1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice_common.h" 5 6 #define ICE_CQ_INIT_REGS(qinfo, prefix) \ 7 do { \ 8 (qinfo)->sq.head = prefix##_ATQH; \ 9 (qinfo)->sq.tail = prefix##_ATQT; \ 10 (qinfo)->sq.len = prefix##_ATQLEN; \ 11 (qinfo)->sq.bah = prefix##_ATQBAH; \ 12 (qinfo)->sq.bal = prefix##_ATQBAL; \ 13 (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \ 14 (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \ 15 (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \ 16 (qinfo)->rq.head = prefix##_ARQH; \ 17 (qinfo)->rq.tail = prefix##_ARQT; \ 18 (qinfo)->rq.len = prefix##_ARQLEN; \ 19 (qinfo)->rq.bah = prefix##_ARQBAH; \ 20 (qinfo)->rq.bal = prefix##_ARQBAL; \ 21 (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \ 22 (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \ 23 (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \ 24 } while (0) 25 26 /** 27 * ice_adminq_init_regs - Initialize AdminQ registers 28 * @hw: pointer to the hardware structure 29 * 30 * This assumes the alloc_sq and alloc_rq functions have already been called 31 */ 32 static void ice_adminq_init_regs(struct ice_hw *hw) 33 { 34 struct ice_ctl_q_info *cq = &hw->adminq; 35 36 ICE_CQ_INIT_REGS(cq, PF_FW); 37 } 38 39 /** 40 * ice_mailbox_init_regs - Initialize Mailbox registers 41 * @hw: pointer to the hardware structure 42 * 43 * This assumes the alloc_sq and alloc_rq functions have already been called 44 */ 45 static void ice_mailbox_init_regs(struct ice_hw *hw) 46 { 47 struct ice_ctl_q_info *cq = &hw->mailboxq; 48 49 ICE_CQ_INIT_REGS(cq, PF_MBX); 50 } 51 52 /** 53 * ice_check_sq_alive 54 * @hw: pointer to the HW struct 55 * @cq: pointer to the specific Control queue 56 * 57 * Returns true if Queue is enabled else false. 58 */ 59 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq) 60 { 61 /* check both queue-length and queue-enable fields */ 62 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask) 63 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask | 64 cq->sq.len_ena_mask)) == 65 (cq->num_sq_entries | cq->sq.len_ena_mask); 66 67 return false; 68 } 69 70 /** 71 * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings 72 * @hw: pointer to the hardware structure 73 * @cq: pointer to the specific Control queue 74 */ 75 static enum ice_status 76 ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) 77 { 78 size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc); 79 80 cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size, 81 &cq->sq.desc_buf.pa, 82 GFP_KERNEL | __GFP_ZERO); 83 if (!cq->sq.desc_buf.va) 84 return ICE_ERR_NO_MEMORY; 85 cq->sq.desc_buf.size = size; 86 87 cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, 88 sizeof(struct ice_sq_cd), GFP_KERNEL); 89 if (!cq->sq.cmd_buf) { 90 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size, 91 cq->sq.desc_buf.va, cq->sq.desc_buf.pa); 92 cq->sq.desc_buf.va = NULL; 93 cq->sq.desc_buf.pa = 0; 94 cq->sq.desc_buf.size = 0; 95 return ICE_ERR_NO_MEMORY; 96 } 97 98 return 0; 99 } 100 101 /** 102 * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings 103 * @hw: pointer to the hardware structure 104 * @cq: pointer to the specific Control queue 105 */ 106 static enum ice_status 107 ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) 108 { 109 size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc); 110 111 cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size, 112 &cq->rq.desc_buf.pa, 113 GFP_KERNEL | __GFP_ZERO); 114 if (!cq->rq.desc_buf.va) 115 return ICE_ERR_NO_MEMORY; 116 cq->rq.desc_buf.size = size; 117 return 0; 118 } 119 120 /** 121 * ice_free_cq_ring - Free control queue ring 122 * @hw: pointer to the hardware structure 123 * @ring: pointer to the specific control queue ring 124 * 125 * This assumes the posted buffers have already been cleaned 126 * and de-allocated 127 */ 128 static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring) 129 { 130 dmam_free_coherent(ice_hw_to_dev(hw), ring->desc_buf.size, 131 ring->desc_buf.va, ring->desc_buf.pa); 132 ring->desc_buf.va = NULL; 133 ring->desc_buf.pa = 0; 134 ring->desc_buf.size = 0; 135 } 136 137 /** 138 * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ 139 * @hw: pointer to the hardware structure 140 * @cq: pointer to the specific Control queue 141 */ 142 static enum ice_status 143 ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 144 { 145 int i; 146 147 /* We'll be allocating the buffer info memory first, then we can 148 * allocate the mapped buffers for the event processing 149 */ 150 cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries, 151 sizeof(cq->rq.desc_buf), GFP_KERNEL); 152 if (!cq->rq.dma_head) 153 return ICE_ERR_NO_MEMORY; 154 cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head; 155 156 /* allocate the mapped buffers */ 157 for (i = 0; i < cq->num_rq_entries; i++) { 158 struct ice_aq_desc *desc; 159 struct ice_dma_mem *bi; 160 161 bi = &cq->rq.r.rq_bi[i]; 162 bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw), 163 cq->rq_buf_size, &bi->pa, 164 GFP_KERNEL | __GFP_ZERO); 165 if (!bi->va) 166 goto unwind_alloc_rq_bufs; 167 bi->size = cq->rq_buf_size; 168 169 /* now configure the descriptors for use */ 170 desc = ICE_CTL_Q_DESC(cq->rq, i); 171 172 desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF); 173 if (cq->rq_buf_size > ICE_AQ_LG_BUF) 174 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB); 175 desc->opcode = 0; 176 /* This is in accordance with Admin queue design, there is no 177 * register for buffer size configuration 178 */ 179 desc->datalen = cpu_to_le16(bi->size); 180 desc->retval = 0; 181 desc->cookie_high = 0; 182 desc->cookie_low = 0; 183 desc->params.generic.addr_high = 184 cpu_to_le32(upper_32_bits(bi->pa)); 185 desc->params.generic.addr_low = 186 cpu_to_le32(lower_32_bits(bi->pa)); 187 desc->params.generic.param0 = 0; 188 desc->params.generic.param1 = 0; 189 } 190 return 0; 191 192 unwind_alloc_rq_bufs: 193 /* don't try to free the one that failed... */ 194 i--; 195 for (; i >= 0; i--) { 196 dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size, 197 cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa); 198 cq->rq.r.rq_bi[i].va = NULL; 199 cq->rq.r.rq_bi[i].pa = 0; 200 cq->rq.r.rq_bi[i].size = 0; 201 } 202 devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head); 203 204 return ICE_ERR_NO_MEMORY; 205 } 206 207 /** 208 * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ 209 * @hw: pointer to the hardware structure 210 * @cq: pointer to the specific Control queue 211 */ 212 static enum ice_status 213 ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 214 { 215 int i; 216 217 /* No mapped memory needed yet, just the buffer info structures */ 218 cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, 219 sizeof(cq->sq.desc_buf), GFP_KERNEL); 220 if (!cq->sq.dma_head) 221 return ICE_ERR_NO_MEMORY; 222 cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head; 223 224 /* allocate the mapped buffers */ 225 for (i = 0; i < cq->num_sq_entries; i++) { 226 struct ice_dma_mem *bi; 227 228 bi = &cq->sq.r.sq_bi[i]; 229 bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw), 230 cq->sq_buf_size, &bi->pa, 231 GFP_KERNEL | __GFP_ZERO); 232 if (!bi->va) 233 goto unwind_alloc_sq_bufs; 234 bi->size = cq->sq_buf_size; 235 } 236 return 0; 237 238 unwind_alloc_sq_bufs: 239 /* don't try to free the one that failed... */ 240 i--; 241 for (; i >= 0; i--) { 242 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.r.sq_bi[i].size, 243 cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa); 244 cq->sq.r.sq_bi[i].va = NULL; 245 cq->sq.r.sq_bi[i].pa = 0; 246 cq->sq.r.sq_bi[i].size = 0; 247 } 248 devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head); 249 250 return ICE_ERR_NO_MEMORY; 251 } 252 253 static enum ice_status 254 ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries) 255 { 256 /* Clear Head and Tail */ 257 wr32(hw, ring->head, 0); 258 wr32(hw, ring->tail, 0); 259 260 /* set starting point */ 261 wr32(hw, ring->len, (num_entries | ring->len_ena_mask)); 262 wr32(hw, ring->bal, lower_32_bits(ring->desc_buf.pa)); 263 wr32(hw, ring->bah, upper_32_bits(ring->desc_buf.pa)); 264 265 /* Check one register to verify that config was applied */ 266 if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa)) 267 return ICE_ERR_AQ_ERROR; 268 269 return 0; 270 } 271 272 /** 273 * ice_cfg_sq_regs - configure Control ATQ registers 274 * @hw: pointer to the hardware structure 275 * @cq: pointer to the specific Control queue 276 * 277 * Configure base address and length registers for the transmit queue 278 */ 279 static enum ice_status 280 ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 281 { 282 return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries); 283 } 284 285 /** 286 * ice_cfg_rq_regs - configure Control ARQ register 287 * @hw: pointer to the hardware structure 288 * @cq: pointer to the specific Control queue 289 * 290 * Configure base address and length registers for the receive (event queue) 291 */ 292 static enum ice_status 293 ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 294 { 295 enum ice_status status; 296 297 status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries); 298 if (status) 299 return status; 300 301 /* Update tail in the HW to post pre-allocated buffers */ 302 wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1)); 303 304 return 0; 305 } 306 307 /** 308 * ice_init_sq - main initialization routine for Control ATQ 309 * @hw: pointer to the hardware structure 310 * @cq: pointer to the specific Control queue 311 * 312 * This is the main initialization routine for the Control Send Queue 313 * Prior to calling this function, the driver *MUST* set the following fields 314 * in the cq->structure: 315 * - cq->num_sq_entries 316 * - cq->sq_buf_size 317 * 318 * Do *NOT* hold the lock when calling this as the memory allocation routines 319 * called are not going to be atomic context safe 320 */ 321 static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 322 { 323 enum ice_status ret_code; 324 325 if (cq->sq.count > 0) { 326 /* queue already initialized */ 327 ret_code = ICE_ERR_NOT_READY; 328 goto init_ctrlq_exit; 329 } 330 331 /* verify input for valid configuration */ 332 if (!cq->num_sq_entries || !cq->sq_buf_size) { 333 ret_code = ICE_ERR_CFG; 334 goto init_ctrlq_exit; 335 } 336 337 cq->sq.next_to_use = 0; 338 cq->sq.next_to_clean = 0; 339 340 /* allocate the ring memory */ 341 ret_code = ice_alloc_ctrlq_sq_ring(hw, cq); 342 if (ret_code) 343 goto init_ctrlq_exit; 344 345 /* allocate buffers in the rings */ 346 ret_code = ice_alloc_sq_bufs(hw, cq); 347 if (ret_code) 348 goto init_ctrlq_free_rings; 349 350 /* initialize base registers */ 351 ret_code = ice_cfg_sq_regs(hw, cq); 352 if (ret_code) 353 goto init_ctrlq_free_rings; 354 355 /* success! */ 356 cq->sq.count = cq->num_sq_entries; 357 goto init_ctrlq_exit; 358 359 init_ctrlq_free_rings: 360 ice_free_cq_ring(hw, &cq->sq); 361 362 init_ctrlq_exit: 363 return ret_code; 364 } 365 366 /** 367 * ice_init_rq - initialize ARQ 368 * @hw: pointer to the hardware structure 369 * @cq: pointer to the specific Control queue 370 * 371 * The main initialization routine for the Admin Receive (Event) Queue. 372 * Prior to calling this function, the driver *MUST* set the following fields 373 * in the cq->structure: 374 * - cq->num_rq_entries 375 * - cq->rq_buf_size 376 * 377 * Do *NOT* hold the lock when calling this as the memory allocation routines 378 * called are not going to be atomic context safe 379 */ 380 static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 381 { 382 enum ice_status ret_code; 383 384 if (cq->rq.count > 0) { 385 /* queue already initialized */ 386 ret_code = ICE_ERR_NOT_READY; 387 goto init_ctrlq_exit; 388 } 389 390 /* verify input for valid configuration */ 391 if (!cq->num_rq_entries || !cq->rq_buf_size) { 392 ret_code = ICE_ERR_CFG; 393 goto init_ctrlq_exit; 394 } 395 396 cq->rq.next_to_use = 0; 397 cq->rq.next_to_clean = 0; 398 399 /* allocate the ring memory */ 400 ret_code = ice_alloc_ctrlq_rq_ring(hw, cq); 401 if (ret_code) 402 goto init_ctrlq_exit; 403 404 /* allocate buffers in the rings */ 405 ret_code = ice_alloc_rq_bufs(hw, cq); 406 if (ret_code) 407 goto init_ctrlq_free_rings; 408 409 /* initialize base registers */ 410 ret_code = ice_cfg_rq_regs(hw, cq); 411 if (ret_code) 412 goto init_ctrlq_free_rings; 413 414 /* success! */ 415 cq->rq.count = cq->num_rq_entries; 416 goto init_ctrlq_exit; 417 418 init_ctrlq_free_rings: 419 ice_free_cq_ring(hw, &cq->rq); 420 421 init_ctrlq_exit: 422 return ret_code; 423 } 424 425 #define ICE_FREE_CQ_BUFS(hw, qi, ring) \ 426 do { \ 427 int i; \ 428 /* free descriptors */ \ 429 for (i = 0; i < (qi)->num_##ring##_entries; i++) \ 430 if ((qi)->ring.r.ring##_bi[i].pa) { \ 431 dmam_free_coherent(ice_hw_to_dev(hw), \ 432 (qi)->ring.r.ring##_bi[i].size,\ 433 (qi)->ring.r.ring##_bi[i].va,\ 434 (qi)->ring.r.ring##_bi[i].pa);\ 435 (qi)->ring.r.ring##_bi[i].va = NULL; \ 436 (qi)->ring.r.ring##_bi[i].pa = 0; \ 437 (qi)->ring.r.ring##_bi[i].size = 0; \ 438 } \ 439 /* free the buffer info list */ \ 440 if ((qi)->ring.cmd_buf) \ 441 devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \ 442 /* free DMA head */ \ 443 devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \ 444 } while (0) 445 446 /** 447 * ice_shutdown_sq - shutdown the Control ATQ 448 * @hw: pointer to the hardware structure 449 * @cq: pointer to the specific Control queue 450 * 451 * The main shutdown routine for the Control Transmit Queue 452 */ 453 static enum ice_status 454 ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 455 { 456 enum ice_status ret_code = 0; 457 458 mutex_lock(&cq->sq_lock); 459 460 if (!cq->sq.count) { 461 ret_code = ICE_ERR_NOT_READY; 462 goto shutdown_sq_out; 463 } 464 465 /* Stop firmware AdminQ processing */ 466 wr32(hw, cq->sq.head, 0); 467 wr32(hw, cq->sq.tail, 0); 468 wr32(hw, cq->sq.len, 0); 469 wr32(hw, cq->sq.bal, 0); 470 wr32(hw, cq->sq.bah, 0); 471 472 cq->sq.count = 0; /* to indicate uninitialized queue */ 473 474 /* free ring buffers and the ring itself */ 475 ICE_FREE_CQ_BUFS(hw, cq, sq); 476 ice_free_cq_ring(hw, &cq->sq); 477 478 shutdown_sq_out: 479 mutex_unlock(&cq->sq_lock); 480 return ret_code; 481 } 482 483 /** 484 * ice_aq_ver_check - Check the reported AQ API version. 485 * @hw: pointer to the hardware structure 486 * 487 * Checks if the driver should load on a given AQ API version. 488 * 489 * Return: 'true' iff the driver should attempt to load. 'false' otherwise. 490 */ 491 static bool ice_aq_ver_check(struct ice_hw *hw) 492 { 493 if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) { 494 /* Major API version is newer than expected, don't load */ 495 dev_warn(ice_hw_to_dev(hw), 496 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); 497 return false; 498 } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) { 499 if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2)) 500 dev_info(ice_hw_to_dev(hw), 501 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); 502 else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR) 503 dev_info(ice_hw_to_dev(hw), 504 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); 505 } else { 506 /* Major API version is older than expected, log a warning */ 507 dev_info(ice_hw_to_dev(hw), 508 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); 509 } 510 return true; 511 } 512 513 /** 514 * ice_shutdown_rq - shutdown Control ARQ 515 * @hw: pointer to the hardware structure 516 * @cq: pointer to the specific Control queue 517 * 518 * The main shutdown routine for the Control Receive Queue 519 */ 520 static enum ice_status 521 ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 522 { 523 enum ice_status ret_code = 0; 524 525 mutex_lock(&cq->rq_lock); 526 527 if (!cq->rq.count) { 528 ret_code = ICE_ERR_NOT_READY; 529 goto shutdown_rq_out; 530 } 531 532 /* Stop Control Queue processing */ 533 wr32(hw, cq->rq.head, 0); 534 wr32(hw, cq->rq.tail, 0); 535 wr32(hw, cq->rq.len, 0); 536 wr32(hw, cq->rq.bal, 0); 537 wr32(hw, cq->rq.bah, 0); 538 539 /* set rq.count to 0 to indicate uninitialized queue */ 540 cq->rq.count = 0; 541 542 /* free ring buffers and the ring itself */ 543 ICE_FREE_CQ_BUFS(hw, cq, rq); 544 ice_free_cq_ring(hw, &cq->rq); 545 546 shutdown_rq_out: 547 mutex_unlock(&cq->rq_lock); 548 return ret_code; 549 } 550 551 /** 552 * ice_init_check_adminq - Check version for Admin Queue to know if its alive 553 * @hw: pointer to the hardware structure 554 */ 555 static enum ice_status ice_init_check_adminq(struct ice_hw *hw) 556 { 557 struct ice_ctl_q_info *cq = &hw->adminq; 558 enum ice_status status; 559 560 status = ice_aq_get_fw_ver(hw, NULL); 561 if (status) 562 goto init_ctrlq_free_rq; 563 564 if (!ice_aq_ver_check(hw)) { 565 status = ICE_ERR_FW_API_VER; 566 goto init_ctrlq_free_rq; 567 } 568 569 return 0; 570 571 init_ctrlq_free_rq: 572 ice_shutdown_rq(hw, cq); 573 ice_shutdown_sq(hw, cq); 574 return status; 575 } 576 577 /** 578 * ice_init_ctrlq - main initialization routine for any control Queue 579 * @hw: pointer to the hardware structure 580 * @q_type: specific Control queue type 581 * 582 * Prior to calling this function, the driver *MUST* set the following fields 583 * in the cq->structure: 584 * - cq->num_sq_entries 585 * - cq->num_rq_entries 586 * - cq->rq_buf_size 587 * - cq->sq_buf_size 588 * 589 * NOTE: this function does not initialize the controlq locks 590 */ 591 static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) 592 { 593 struct ice_ctl_q_info *cq; 594 enum ice_status ret_code; 595 596 switch (q_type) { 597 case ICE_CTL_Q_ADMIN: 598 ice_adminq_init_regs(hw); 599 cq = &hw->adminq; 600 break; 601 case ICE_CTL_Q_MAILBOX: 602 ice_mailbox_init_regs(hw); 603 cq = &hw->mailboxq; 604 break; 605 default: 606 return ICE_ERR_PARAM; 607 } 608 cq->qtype = q_type; 609 610 /* verify input for valid configuration */ 611 if (!cq->num_rq_entries || !cq->num_sq_entries || 612 !cq->rq_buf_size || !cq->sq_buf_size) { 613 return ICE_ERR_CFG; 614 } 615 616 /* setup SQ command write back timeout */ 617 cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT; 618 619 /* allocate the ATQ */ 620 ret_code = ice_init_sq(hw, cq); 621 if (ret_code) 622 return ret_code; 623 624 /* allocate the ARQ */ 625 ret_code = ice_init_rq(hw, cq); 626 if (ret_code) 627 goto init_ctrlq_free_sq; 628 629 /* success! */ 630 return 0; 631 632 init_ctrlq_free_sq: 633 ice_shutdown_sq(hw, cq); 634 return ret_code; 635 } 636 637 /** 638 * ice_init_all_ctrlq - main initialization routine for all control queues 639 * @hw: pointer to the hardware structure 640 * 641 * Prior to calling this function, the driver MUST* set the following fields 642 * in the cq->structure for all control queues: 643 * - cq->num_sq_entries 644 * - cq->num_rq_entries 645 * - cq->rq_buf_size 646 * - cq->sq_buf_size 647 * 648 * NOTE: this function does not initialize the controlq locks. 649 */ 650 enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) 651 { 652 enum ice_status ret_code; 653 654 /* Init FW admin queue */ 655 ret_code = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN); 656 if (ret_code) 657 return ret_code; 658 659 ret_code = ice_init_check_adminq(hw); 660 if (ret_code) 661 return ret_code; 662 663 /* Init Mailbox queue */ 664 return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX); 665 } 666 667 /** 668 * ice_init_ctrlq_locks - Initialize locks for a control queue 669 * @cq: pointer to the control queue 670 * 671 * Initializes the send and receive queue locks for a given control queue. 672 */ 673 static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq) 674 { 675 mutex_init(&cq->sq_lock); 676 mutex_init(&cq->rq_lock); 677 } 678 679 /** 680 * ice_create_all_ctrlq - main initialization routine for all control queues 681 * @hw: pointer to the hardware structure 682 * 683 * Prior to calling this function, the driver *MUST* set the following fields 684 * in the cq->structure for all control queues: 685 * - cq->num_sq_entries 686 * - cq->num_rq_entries 687 * - cq->rq_buf_size 688 * - cq->sq_buf_size 689 * 690 * This function creates all the control queue locks and then calls 691 * ice_init_all_ctrlq. It should be called once during driver load. If the 692 * driver needs to re-initialize control queues at run time it should call 693 * ice_init_all_ctrlq instead. 694 */ 695 enum ice_status ice_create_all_ctrlq(struct ice_hw *hw) 696 { 697 ice_init_ctrlq_locks(&hw->adminq); 698 ice_init_ctrlq_locks(&hw->mailboxq); 699 700 return ice_init_all_ctrlq(hw); 701 } 702 703 /** 704 * ice_shutdown_ctrlq - shutdown routine for any control queue 705 * @hw: pointer to the hardware structure 706 * @q_type: specific Control queue type 707 * 708 * NOTE: this function does not destroy the control queue locks. 709 */ 710 static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) 711 { 712 struct ice_ctl_q_info *cq; 713 714 switch (q_type) { 715 case ICE_CTL_Q_ADMIN: 716 cq = &hw->adminq; 717 if (ice_check_sq_alive(hw, cq)) 718 ice_aq_q_shutdown(hw, true); 719 break; 720 case ICE_CTL_Q_MAILBOX: 721 cq = &hw->mailboxq; 722 break; 723 default: 724 return; 725 } 726 727 ice_shutdown_sq(hw, cq); 728 ice_shutdown_rq(hw, cq); 729 } 730 731 /** 732 * ice_shutdown_all_ctrlq - shutdown routine for all control queues 733 * @hw: pointer to the hardware structure 734 * 735 * NOTE: this function does not destroy the control queue locks. The driver 736 * may call this at runtime to shutdown and later restart control queues, such 737 * as in response to a reset event. 738 */ 739 void ice_shutdown_all_ctrlq(struct ice_hw *hw) 740 { 741 /* Shutdown FW admin queue */ 742 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); 743 /* Shutdown PF-VF Mailbox */ 744 ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX); 745 } 746 747 /** 748 * ice_destroy_ctrlq_locks - Destroy locks for a control queue 749 * @cq: pointer to the control queue 750 * 751 * Destroys the send and receive queue locks for a given control queue. 752 */ 753 static void 754 ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq) 755 { 756 mutex_destroy(&cq->sq_lock); 757 mutex_destroy(&cq->rq_lock); 758 } 759 760 /** 761 * ice_destroy_all_ctrlq - exit routine for all control queues 762 * @hw: pointer to the hardware structure 763 * 764 * This function shuts down all the control queues and then destroys the 765 * control queue locks. It should be called once during driver unload. The 766 * driver should call ice_shutdown_all_ctrlq if it needs to shut down and 767 * reinitialize control queues, such as in response to a reset event. 768 */ 769 void ice_destroy_all_ctrlq(struct ice_hw *hw) 770 { 771 /* shut down all the control queues first */ 772 ice_shutdown_all_ctrlq(hw); 773 774 ice_destroy_ctrlq_locks(&hw->adminq); 775 ice_destroy_ctrlq_locks(&hw->mailboxq); 776 } 777 778 /** 779 * ice_clean_sq - cleans Admin send queue (ATQ) 780 * @hw: pointer to the hardware structure 781 * @cq: pointer to the specific Control queue 782 * 783 * returns the number of free desc 784 */ 785 static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 786 { 787 struct ice_ctl_q_ring *sq = &cq->sq; 788 u16 ntc = sq->next_to_clean; 789 struct ice_sq_cd *details; 790 struct ice_aq_desc *desc; 791 792 desc = ICE_CTL_Q_DESC(*sq, ntc); 793 details = ICE_CTL_Q_DETAILS(*sq, ntc); 794 795 while (rd32(hw, cq->sq.head) != ntc) { 796 ice_debug(hw, ICE_DBG_AQ_MSG, 797 "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head)); 798 memset(desc, 0, sizeof(*desc)); 799 memset(details, 0, sizeof(*details)); 800 ntc++; 801 if (ntc == sq->count) 802 ntc = 0; 803 desc = ICE_CTL_Q_DESC(*sq, ntc); 804 details = ICE_CTL_Q_DETAILS(*sq, ntc); 805 } 806 807 sq->next_to_clean = ntc; 808 809 return ICE_CTL_Q_DESC_UNUSED(sq); 810 } 811 812 /** 813 * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ) 814 * @hw: pointer to the HW struct 815 * @cq: pointer to the specific Control queue 816 * 817 * Returns true if the firmware has processed all descriptors on the 818 * admin send queue. Returns false if there are still requests pending. 819 */ 820 static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq) 821 { 822 /* AQ designers suggest use of head for better 823 * timing reliability than DD bit 824 */ 825 return rd32(hw, cq->sq.head) == cq->sq.next_to_use; 826 } 827 828 /** 829 * ice_sq_send_cmd - send command to Control Queue (ATQ) 830 * @hw: pointer to the HW struct 831 * @cq: pointer to the specific Control queue 832 * @desc: prefilled descriptor describing the command (non DMA mem) 833 * @buf: buffer to use for indirect commands (or NULL for direct commands) 834 * @buf_size: size of buffer for indirect commands (or 0 for direct commands) 835 * @cd: pointer to command details structure 836 * 837 * This is the main send command routine for the ATQ. It runs the queue, 838 * cleans the queue, etc. 839 */ 840 enum ice_status 841 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, 842 struct ice_aq_desc *desc, void *buf, u16 buf_size, 843 struct ice_sq_cd *cd) 844 { 845 struct ice_dma_mem *dma_buf = NULL; 846 struct ice_aq_desc *desc_on_ring; 847 bool cmd_completed = false; 848 enum ice_status status = 0; 849 struct ice_sq_cd *details; 850 u32 total_delay = 0; 851 u16 retval = 0; 852 u32 val = 0; 853 854 /* if reset is in progress return a soft error */ 855 if (hw->reset_ongoing) 856 return ICE_ERR_RESET_ONGOING; 857 mutex_lock(&cq->sq_lock); 858 859 cq->sq_last_status = ICE_AQ_RC_OK; 860 861 if (!cq->sq.count) { 862 ice_debug(hw, ICE_DBG_AQ_MSG, 863 "Control Send queue not initialized.\n"); 864 status = ICE_ERR_AQ_EMPTY; 865 goto sq_send_command_error; 866 } 867 868 if ((buf && !buf_size) || (!buf && buf_size)) { 869 status = ICE_ERR_PARAM; 870 goto sq_send_command_error; 871 } 872 873 if (buf) { 874 if (buf_size > cq->sq_buf_size) { 875 ice_debug(hw, ICE_DBG_AQ_MSG, 876 "Invalid buffer size for Control Send queue: %d.\n", 877 buf_size); 878 status = ICE_ERR_INVAL_SIZE; 879 goto sq_send_command_error; 880 } 881 882 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_BUF); 883 if (buf_size > ICE_AQ_LG_BUF) 884 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB); 885 } 886 887 val = rd32(hw, cq->sq.head); 888 if (val >= cq->num_sq_entries) { 889 ice_debug(hw, ICE_DBG_AQ_MSG, 890 "head overrun at %d in the Control Send Queue ring\n", 891 val); 892 status = ICE_ERR_AQ_EMPTY; 893 goto sq_send_command_error; 894 } 895 896 details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use); 897 if (cd) 898 *details = *cd; 899 else 900 memset(details, 0, sizeof(*details)); 901 902 /* Call clean and check queue available function to reclaim the 903 * descriptors that were processed by FW/MBX; the function returns the 904 * number of desc available. The clean function called here could be 905 * called in a separate thread in case of asynchronous completions. 906 */ 907 if (ice_clean_sq(hw, cq) == 0) { 908 ice_debug(hw, ICE_DBG_AQ_MSG, 909 "Error: Control Send Queue is full.\n"); 910 status = ICE_ERR_AQ_FULL; 911 goto sq_send_command_error; 912 } 913 914 /* initialize the temp desc pointer with the right desc */ 915 desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use); 916 917 /* if the desc is available copy the temp desc to the right place */ 918 memcpy(desc_on_ring, desc, sizeof(*desc_on_ring)); 919 920 /* if buf is not NULL assume indirect command */ 921 if (buf) { 922 dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use]; 923 /* copy the user buf into the respective DMA buf */ 924 memcpy(dma_buf->va, buf, buf_size); 925 desc_on_ring->datalen = cpu_to_le16(buf_size); 926 927 /* Update the address values in the desc with the pa value 928 * for respective buffer 929 */ 930 desc_on_ring->params.generic.addr_high = 931 cpu_to_le32(upper_32_bits(dma_buf->pa)); 932 desc_on_ring->params.generic.addr_low = 933 cpu_to_le32(lower_32_bits(dma_buf->pa)); 934 } 935 936 /* Debug desc and buffer */ 937 ice_debug(hw, ICE_DBG_AQ_MSG, 938 "ATQ: Control Send queue desc and buffer:\n"); 939 940 ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc_on_ring, buf, buf_size); 941 942 (cq->sq.next_to_use)++; 943 if (cq->sq.next_to_use == cq->sq.count) 944 cq->sq.next_to_use = 0; 945 wr32(hw, cq->sq.tail, cq->sq.next_to_use); 946 947 do { 948 if (ice_sq_done(hw, cq)) 949 break; 950 951 mdelay(1); 952 total_delay++; 953 } while (total_delay < cq->sq_cmd_timeout); 954 955 /* if ready, copy the desc back to temp */ 956 if (ice_sq_done(hw, cq)) { 957 memcpy(desc, desc_on_ring, sizeof(*desc)); 958 if (buf) { 959 /* get returned length to copy */ 960 u16 copy_size = le16_to_cpu(desc->datalen); 961 962 if (copy_size > buf_size) { 963 ice_debug(hw, ICE_DBG_AQ_MSG, 964 "Return len %d > than buf len %d\n", 965 copy_size, buf_size); 966 status = ICE_ERR_AQ_ERROR; 967 } else { 968 memcpy(buf, dma_buf->va, copy_size); 969 } 970 } 971 retval = le16_to_cpu(desc->retval); 972 if (retval) { 973 ice_debug(hw, ICE_DBG_AQ_MSG, 974 "Control Send Queue command completed with error 0x%x\n", 975 retval); 976 977 /* strip off FW internal code */ 978 retval &= 0xff; 979 } 980 cmd_completed = true; 981 if (!status && retval != ICE_AQ_RC_OK) 982 status = ICE_ERR_AQ_ERROR; 983 cq->sq_last_status = (enum ice_aq_err)retval; 984 } 985 986 ice_debug(hw, ICE_DBG_AQ_MSG, 987 "ATQ: desc and buffer writeback:\n"); 988 989 ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, buf, buf_size); 990 991 /* save writeback AQ if requested */ 992 if (details->wb_desc) 993 memcpy(details->wb_desc, desc_on_ring, 994 sizeof(*details->wb_desc)); 995 996 /* update the error if time out occurred */ 997 if (!cmd_completed) { 998 ice_debug(hw, ICE_DBG_AQ_MSG, 999 "Control Send Queue Writeback timeout.\n"); 1000 status = ICE_ERR_AQ_TIMEOUT; 1001 } 1002 1003 sq_send_command_error: 1004 mutex_unlock(&cq->sq_lock); 1005 return status; 1006 } 1007 1008 /** 1009 * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function 1010 * @desc: pointer to the temp descriptor (non DMA mem) 1011 * @opcode: the opcode can be used to decide which flags to turn off or on 1012 * 1013 * Fill the desc with default values 1014 */ 1015 void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode) 1016 { 1017 /* zero out the desc */ 1018 memset(desc, 0, sizeof(*desc)); 1019 desc->opcode = cpu_to_le16(opcode); 1020 desc->flags = cpu_to_le16(ICE_AQ_FLAG_SI); 1021 } 1022 1023 /** 1024 * ice_clean_rq_elem 1025 * @hw: pointer to the HW struct 1026 * @cq: pointer to the specific Control queue 1027 * @e: event info from the receive descriptor, includes any buffers 1028 * @pending: number of events that could be left to process 1029 * 1030 * This function cleans one Admin Receive Queue element and returns 1031 * the contents through e. It can also return how many events are 1032 * left to process through 'pending'. 1033 */ 1034 enum ice_status 1035 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, 1036 struct ice_rq_event_info *e, u16 *pending) 1037 { 1038 u16 ntc = cq->rq.next_to_clean; 1039 enum ice_status ret_code = 0; 1040 struct ice_aq_desc *desc; 1041 struct ice_dma_mem *bi; 1042 u16 desc_idx; 1043 u16 datalen; 1044 u16 flags; 1045 u16 ntu; 1046 1047 /* pre-clean the event info */ 1048 memset(&e->desc, 0, sizeof(e->desc)); 1049 1050 /* take the lock before we start messing with the ring */ 1051 mutex_lock(&cq->rq_lock); 1052 1053 if (!cq->rq.count) { 1054 ice_debug(hw, ICE_DBG_AQ_MSG, 1055 "Control Receive queue not initialized.\n"); 1056 ret_code = ICE_ERR_AQ_EMPTY; 1057 goto clean_rq_elem_err; 1058 } 1059 1060 /* set next_to_use to head */ 1061 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); 1062 1063 if (ntu == ntc) { 1064 /* nothing to do - shouldn't need to update ring's values */ 1065 ret_code = ICE_ERR_AQ_NO_WORK; 1066 goto clean_rq_elem_out; 1067 } 1068 1069 /* now clean the next descriptor */ 1070 desc = ICE_CTL_Q_DESC(cq->rq, ntc); 1071 desc_idx = ntc; 1072 1073 cq->rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval); 1074 flags = le16_to_cpu(desc->flags); 1075 if (flags & ICE_AQ_FLAG_ERR) { 1076 ret_code = ICE_ERR_AQ_ERROR; 1077 ice_debug(hw, ICE_DBG_AQ_MSG, 1078 "Control Receive Queue Event received with error 0x%x\n", 1079 cq->rq_last_status); 1080 } 1081 memcpy(&e->desc, desc, sizeof(e->desc)); 1082 datalen = le16_to_cpu(desc->datalen); 1083 e->msg_len = min(datalen, e->buf_len); 1084 if (e->msg_buf && e->msg_len) 1085 memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len); 1086 1087 ice_debug(hw, ICE_DBG_AQ_MSG, "ARQ: desc and buffer:\n"); 1088 1089 ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, e->msg_buf, 1090 cq->rq_buf_size); 1091 1092 /* Restore the original datalen and buffer address in the desc, 1093 * FW updates datalen to indicate the event message size 1094 */ 1095 bi = &cq->rq.r.rq_bi[ntc]; 1096 memset(desc, 0, sizeof(*desc)); 1097 1098 desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF); 1099 if (cq->rq_buf_size > ICE_AQ_LG_BUF) 1100 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB); 1101 desc->datalen = cpu_to_le16(bi->size); 1102 desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa)); 1103 desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); 1104 1105 /* set tail = the last cleaned desc index. */ 1106 wr32(hw, cq->rq.tail, ntc); 1107 /* ntc is updated to tail + 1 */ 1108 ntc++; 1109 if (ntc == cq->num_rq_entries) 1110 ntc = 0; 1111 cq->rq.next_to_clean = ntc; 1112 cq->rq.next_to_use = ntu; 1113 1114 clean_rq_elem_out: 1115 /* Set pending if needed, unlock and return */ 1116 if (pending) { 1117 /* re-read HW head to calculate actual pending messages */ 1118 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); 1119 *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc)); 1120 } 1121 clean_rq_elem_err: 1122 mutex_unlock(&cq->rq_lock); 1123 1124 return ret_code; 1125 } 1126