1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice_common.h" 5 6 #define ICE_CQ_INIT_REGS(qinfo, prefix) \ 7 do { \ 8 (qinfo)->sq.head = prefix##_ATQH; \ 9 (qinfo)->sq.tail = prefix##_ATQT; \ 10 (qinfo)->sq.len = prefix##_ATQLEN; \ 11 (qinfo)->sq.bah = prefix##_ATQBAH; \ 12 (qinfo)->sq.bal = prefix##_ATQBAL; \ 13 (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \ 14 (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \ 15 (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \ 16 (qinfo)->rq.head = prefix##_ARQH; \ 17 (qinfo)->rq.tail = prefix##_ARQT; \ 18 (qinfo)->rq.len = prefix##_ARQLEN; \ 19 (qinfo)->rq.bah = prefix##_ARQBAH; \ 20 (qinfo)->rq.bal = prefix##_ARQBAL; \ 21 (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \ 22 (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \ 23 (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \ 24 } while (0) 25 26 /** 27 * ice_adminq_init_regs - Initialize AdminQ registers 28 * @hw: pointer to the hardware structure 29 * 30 * This assumes the alloc_sq and alloc_rq functions have already been called 31 */ 32 static void ice_adminq_init_regs(struct ice_hw *hw) 33 { 34 struct ice_ctl_q_info *cq = &hw->adminq; 35 36 ICE_CQ_INIT_REGS(cq, PF_FW); 37 } 38 39 /** 40 * ice_mailbox_init_regs - Initialize Mailbox registers 41 * @hw: pointer to the hardware structure 42 * 43 * This assumes the alloc_sq and alloc_rq functions have already been called 44 */ 45 static void ice_mailbox_init_regs(struct ice_hw *hw) 46 { 47 struct ice_ctl_q_info *cq = &hw->mailboxq; 48 49 ICE_CQ_INIT_REGS(cq, PF_MBX); 50 } 51 52 /** 53 * ice_check_sq_alive 54 * @hw: pointer to the hw struct 55 * @cq: pointer to the specific Control queue 56 * 57 * Returns true if Queue is enabled else false. 58 */ 59 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq) 60 { 61 /* check both queue-length and queue-enable fields */ 62 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask) 63 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask | 64 cq->sq.len_ena_mask)) == 65 (cq->num_sq_entries | cq->sq.len_ena_mask); 66 67 return false; 68 } 69 70 /** 71 * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings 72 * @hw: pointer to the hardware structure 73 * @cq: pointer to the specific Control queue 74 */ 75 static enum ice_status 76 ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) 77 { 78 size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc); 79 80 cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size, 81 &cq->sq.desc_buf.pa, 82 GFP_KERNEL | __GFP_ZERO); 83 if (!cq->sq.desc_buf.va) 84 return ICE_ERR_NO_MEMORY; 85 cq->sq.desc_buf.size = size; 86 87 cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, 88 sizeof(struct ice_sq_cd), GFP_KERNEL); 89 if (!cq->sq.cmd_buf) { 90 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size, 91 cq->sq.desc_buf.va, cq->sq.desc_buf.pa); 92 cq->sq.desc_buf.va = NULL; 93 cq->sq.desc_buf.pa = 0; 94 cq->sq.desc_buf.size = 0; 95 return ICE_ERR_NO_MEMORY; 96 } 97 98 return 0; 99 } 100 101 /** 102 * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings 103 * @hw: pointer to the hardware structure 104 * @cq: pointer to the specific Control queue 105 */ 106 static enum ice_status 107 ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) 108 { 109 size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc); 110 111 cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size, 112 &cq->rq.desc_buf.pa, 113 GFP_KERNEL | __GFP_ZERO); 114 if (!cq->rq.desc_buf.va) 115 return ICE_ERR_NO_MEMORY; 116 cq->rq.desc_buf.size = size; 117 return 0; 118 } 119 120 /** 121 * ice_free_cq_ring - Free control queue ring 122 * @hw: pointer to the hardware structure 123 * @ring: pointer to the specific control queue ring 124 * 125 * This assumes the posted buffers have already been cleaned 126 * and de-allocated 127 */ 128 static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring) 129 { 130 dmam_free_coherent(ice_hw_to_dev(hw), ring->desc_buf.size, 131 ring->desc_buf.va, ring->desc_buf.pa); 132 ring->desc_buf.va = NULL; 133 ring->desc_buf.pa = 0; 134 ring->desc_buf.size = 0; 135 } 136 137 /** 138 * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ 139 * @hw: pointer to the hardware structure 140 * @cq: pointer to the specific Control queue 141 */ 142 static enum ice_status 143 ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 144 { 145 int i; 146 147 /* We'll be allocating the buffer info memory first, then we can 148 * allocate the mapped buffers for the event processing 149 */ 150 cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries, 151 sizeof(cq->rq.desc_buf), GFP_KERNEL); 152 if (!cq->rq.dma_head) 153 return ICE_ERR_NO_MEMORY; 154 cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head; 155 156 /* allocate the mapped buffers */ 157 for (i = 0; i < cq->num_rq_entries; i++) { 158 struct ice_aq_desc *desc; 159 struct ice_dma_mem *bi; 160 161 bi = &cq->rq.r.rq_bi[i]; 162 bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw), 163 cq->rq_buf_size, &bi->pa, 164 GFP_KERNEL | __GFP_ZERO); 165 if (!bi->va) 166 goto unwind_alloc_rq_bufs; 167 bi->size = cq->rq_buf_size; 168 169 /* now configure the descriptors for use */ 170 desc = ICE_CTL_Q_DESC(cq->rq, i); 171 172 desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF); 173 if (cq->rq_buf_size > ICE_AQ_LG_BUF) 174 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB); 175 desc->opcode = 0; 176 /* This is in accordance with Admin queue design, there is no 177 * register for buffer size configuration 178 */ 179 desc->datalen = cpu_to_le16(bi->size); 180 desc->retval = 0; 181 desc->cookie_high = 0; 182 desc->cookie_low = 0; 183 desc->params.generic.addr_high = 184 cpu_to_le32(upper_32_bits(bi->pa)); 185 desc->params.generic.addr_low = 186 cpu_to_le32(lower_32_bits(bi->pa)); 187 desc->params.generic.param0 = 0; 188 desc->params.generic.param1 = 0; 189 } 190 return 0; 191 192 unwind_alloc_rq_bufs: 193 /* don't try to free the one that failed... */ 194 i--; 195 for (; i >= 0; i--) { 196 dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size, 197 cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa); 198 cq->rq.r.rq_bi[i].va = NULL; 199 cq->rq.r.rq_bi[i].pa = 0; 200 cq->rq.r.rq_bi[i].size = 0; 201 } 202 devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head); 203 204 return ICE_ERR_NO_MEMORY; 205 } 206 207 /** 208 * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ 209 * @hw: pointer to the hardware structure 210 * @cq: pointer to the specific Control queue 211 */ 212 static enum ice_status 213 ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 214 { 215 int i; 216 217 /* No mapped memory needed yet, just the buffer info structures */ 218 cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, 219 sizeof(cq->sq.desc_buf), GFP_KERNEL); 220 if (!cq->sq.dma_head) 221 return ICE_ERR_NO_MEMORY; 222 cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head; 223 224 /* allocate the mapped buffers */ 225 for (i = 0; i < cq->num_sq_entries; i++) { 226 struct ice_dma_mem *bi; 227 228 bi = &cq->sq.r.sq_bi[i]; 229 bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw), 230 cq->sq_buf_size, &bi->pa, 231 GFP_KERNEL | __GFP_ZERO); 232 if (!bi->va) 233 goto unwind_alloc_sq_bufs; 234 bi->size = cq->sq_buf_size; 235 } 236 return 0; 237 238 unwind_alloc_sq_bufs: 239 /* don't try to free the one that failed... */ 240 i--; 241 for (; i >= 0; i--) { 242 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.r.sq_bi[i].size, 243 cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa); 244 cq->sq.r.sq_bi[i].va = NULL; 245 cq->sq.r.sq_bi[i].pa = 0; 246 cq->sq.r.sq_bi[i].size = 0; 247 } 248 devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head); 249 250 return ICE_ERR_NO_MEMORY; 251 } 252 253 static enum ice_status 254 ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries) 255 { 256 /* Clear Head and Tail */ 257 wr32(hw, ring->head, 0); 258 wr32(hw, ring->tail, 0); 259 260 /* set starting point */ 261 wr32(hw, ring->len, (num_entries | ring->len_ena_mask)); 262 wr32(hw, ring->bal, lower_32_bits(ring->desc_buf.pa)); 263 wr32(hw, ring->bah, upper_32_bits(ring->desc_buf.pa)); 264 265 /* Check one register to verify that config was applied */ 266 if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa)) 267 return ICE_ERR_AQ_ERROR; 268 269 return 0; 270 } 271 272 /** 273 * ice_cfg_sq_regs - configure Control ATQ registers 274 * @hw: pointer to the hardware structure 275 * @cq: pointer to the specific Control queue 276 * 277 * Configure base address and length registers for the transmit queue 278 */ 279 static enum ice_status 280 ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 281 { 282 return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries); 283 } 284 285 /** 286 * ice_cfg_rq_regs - configure Control ARQ register 287 * @hw: pointer to the hardware structure 288 * @cq: pointer to the specific Control queue 289 * 290 * Configure base address and length registers for the receive (event q) 291 */ 292 static enum ice_status 293 ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 294 { 295 enum ice_status status; 296 297 status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries); 298 if (status) 299 return status; 300 301 /* Update tail in the HW to post pre-allocated buffers */ 302 wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1)); 303 304 return 0; 305 } 306 307 /** 308 * ice_init_sq - main initialization routine for Control ATQ 309 * @hw: pointer to the hardware structure 310 * @cq: pointer to the specific Control queue 311 * 312 * This is the main initialization routine for the Control Send Queue 313 * Prior to calling this function, drivers *MUST* set the following fields 314 * in the cq->structure: 315 * - cq->num_sq_entries 316 * - cq->sq_buf_size 317 * 318 * Do *NOT* hold the lock when calling this as the memory allocation routines 319 * called are not going to be atomic context safe 320 */ 321 static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 322 { 323 enum ice_status ret_code; 324 325 if (cq->sq.count > 0) { 326 /* queue already initialized */ 327 ret_code = ICE_ERR_NOT_READY; 328 goto init_ctrlq_exit; 329 } 330 331 /* verify input for valid configuration */ 332 if (!cq->num_sq_entries || !cq->sq_buf_size) { 333 ret_code = ICE_ERR_CFG; 334 goto init_ctrlq_exit; 335 } 336 337 cq->sq.next_to_use = 0; 338 cq->sq.next_to_clean = 0; 339 340 /* allocate the ring memory */ 341 ret_code = ice_alloc_ctrlq_sq_ring(hw, cq); 342 if (ret_code) 343 goto init_ctrlq_exit; 344 345 /* allocate buffers in the rings */ 346 ret_code = ice_alloc_sq_bufs(hw, cq); 347 if (ret_code) 348 goto init_ctrlq_free_rings; 349 350 /* initialize base registers */ 351 ret_code = ice_cfg_sq_regs(hw, cq); 352 if (ret_code) 353 goto init_ctrlq_free_rings; 354 355 /* success! */ 356 cq->sq.count = cq->num_sq_entries; 357 goto init_ctrlq_exit; 358 359 init_ctrlq_free_rings: 360 ice_free_cq_ring(hw, &cq->sq); 361 362 init_ctrlq_exit: 363 return ret_code; 364 } 365 366 /** 367 * ice_init_rq - initialize ARQ 368 * @hw: pointer to the hardware structure 369 * @cq: pointer to the specific Control queue 370 * 371 * The main initialization routine for the Admin Receive (Event) Queue. 372 * Prior to calling this function, drivers *MUST* set the following fields 373 * in the cq->structure: 374 * - cq->num_rq_entries 375 * - cq->rq_buf_size 376 * 377 * Do *NOT* hold the lock when calling this as the memory allocation routines 378 * called are not going to be atomic context safe 379 */ 380 static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 381 { 382 enum ice_status ret_code; 383 384 if (cq->rq.count > 0) { 385 /* queue already initialized */ 386 ret_code = ICE_ERR_NOT_READY; 387 goto init_ctrlq_exit; 388 } 389 390 /* verify input for valid configuration */ 391 if (!cq->num_rq_entries || !cq->rq_buf_size) { 392 ret_code = ICE_ERR_CFG; 393 goto init_ctrlq_exit; 394 } 395 396 cq->rq.next_to_use = 0; 397 cq->rq.next_to_clean = 0; 398 399 /* allocate the ring memory */ 400 ret_code = ice_alloc_ctrlq_rq_ring(hw, cq); 401 if (ret_code) 402 goto init_ctrlq_exit; 403 404 /* allocate buffers in the rings */ 405 ret_code = ice_alloc_rq_bufs(hw, cq); 406 if (ret_code) 407 goto init_ctrlq_free_rings; 408 409 /* initialize base registers */ 410 ret_code = ice_cfg_rq_regs(hw, cq); 411 if (ret_code) 412 goto init_ctrlq_free_rings; 413 414 /* success! */ 415 cq->rq.count = cq->num_rq_entries; 416 goto init_ctrlq_exit; 417 418 init_ctrlq_free_rings: 419 ice_free_cq_ring(hw, &cq->rq); 420 421 init_ctrlq_exit: 422 return ret_code; 423 } 424 425 #define ICE_FREE_CQ_BUFS(hw, qi, ring) \ 426 do { \ 427 int i; \ 428 /* free descriptors */ \ 429 for (i = 0; i < (qi)->num_##ring##_entries; i++) \ 430 if ((qi)->ring.r.ring##_bi[i].pa) { \ 431 dmam_free_coherent(ice_hw_to_dev(hw), \ 432 (qi)->ring.r.ring##_bi[i].size,\ 433 (qi)->ring.r.ring##_bi[i].va,\ 434 (qi)->ring.r.ring##_bi[i].pa);\ 435 (qi)->ring.r.ring##_bi[i].va = NULL; \ 436 (qi)->ring.r.ring##_bi[i].pa = 0; \ 437 (qi)->ring.r.ring##_bi[i].size = 0; \ 438 } \ 439 /* free the buffer info list */ \ 440 if ((qi)->ring.cmd_buf) \ 441 devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \ 442 /* free dma head */ \ 443 devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \ 444 } while (0) 445 446 /** 447 * ice_shutdown_sq - shutdown the Control ATQ 448 * @hw: pointer to the hardware structure 449 * @cq: pointer to the specific Control queue 450 * 451 * The main shutdown routine for the Control Transmit Queue 452 */ 453 static enum ice_status 454 ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 455 { 456 enum ice_status ret_code = 0; 457 458 mutex_lock(&cq->sq_lock); 459 460 if (!cq->sq.count) { 461 ret_code = ICE_ERR_NOT_READY; 462 goto shutdown_sq_out; 463 } 464 465 /* Stop firmware AdminQ processing */ 466 wr32(hw, cq->sq.head, 0); 467 wr32(hw, cq->sq.tail, 0); 468 wr32(hw, cq->sq.len, 0); 469 wr32(hw, cq->sq.bal, 0); 470 wr32(hw, cq->sq.bah, 0); 471 472 cq->sq.count = 0; /* to indicate uninitialized queue */ 473 474 /* free ring buffers and the ring itself */ 475 ICE_FREE_CQ_BUFS(hw, cq, sq); 476 ice_free_cq_ring(hw, &cq->sq); 477 478 shutdown_sq_out: 479 mutex_unlock(&cq->sq_lock); 480 return ret_code; 481 } 482 483 /** 484 * ice_aq_ver_check - Check the reported AQ API version. 485 * @hw: pointer to the hardware structure 486 * 487 * Checks if the driver should load on a given AQ API version. 488 * 489 * Return: 'true' iff the driver should attempt to load. 'false' otherwise. 490 */ 491 static bool ice_aq_ver_check(struct ice_hw *hw) 492 { 493 if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) { 494 /* Major API version is newer than expected, don't load */ 495 dev_warn(ice_hw_to_dev(hw), 496 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); 497 return false; 498 } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) { 499 if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2)) 500 dev_info(ice_hw_to_dev(hw), 501 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); 502 else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR) 503 dev_info(ice_hw_to_dev(hw), 504 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); 505 } else { 506 /* Major API version is older than expected, log a warning */ 507 dev_info(ice_hw_to_dev(hw), 508 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); 509 } 510 return true; 511 } 512 513 /** 514 * ice_shutdown_rq - shutdown Control ARQ 515 * @hw: pointer to the hardware structure 516 * @cq: pointer to the specific Control queue 517 * 518 * The main shutdown routine for the Control Receive Queue 519 */ 520 static enum ice_status 521 ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 522 { 523 enum ice_status ret_code = 0; 524 525 mutex_lock(&cq->rq_lock); 526 527 if (!cq->rq.count) { 528 ret_code = ICE_ERR_NOT_READY; 529 goto shutdown_rq_out; 530 } 531 532 /* Stop Control Queue processing */ 533 wr32(hw, cq->rq.head, 0); 534 wr32(hw, cq->rq.tail, 0); 535 wr32(hw, cq->rq.len, 0); 536 wr32(hw, cq->rq.bal, 0); 537 wr32(hw, cq->rq.bah, 0); 538 539 /* set rq.count to 0 to indicate uninitialized queue */ 540 cq->rq.count = 0; 541 542 /* free ring buffers and the ring itself */ 543 ICE_FREE_CQ_BUFS(hw, cq, rq); 544 ice_free_cq_ring(hw, &cq->rq); 545 546 shutdown_rq_out: 547 mutex_unlock(&cq->rq_lock); 548 return ret_code; 549 } 550 551 /** 552 * ice_init_check_adminq - Check version for Admin Queue to know if its alive 553 * @hw: pointer to the hardware structure 554 */ 555 static enum ice_status ice_init_check_adminq(struct ice_hw *hw) 556 { 557 struct ice_ctl_q_info *cq = &hw->adminq; 558 enum ice_status status; 559 560 status = ice_aq_get_fw_ver(hw, NULL); 561 if (status) 562 goto init_ctrlq_free_rq; 563 564 if (!ice_aq_ver_check(hw)) { 565 status = ICE_ERR_FW_API_VER; 566 goto init_ctrlq_free_rq; 567 } 568 569 return 0; 570 571 init_ctrlq_free_rq: 572 if (cq->rq.count) { 573 ice_shutdown_rq(hw, cq); 574 mutex_destroy(&cq->rq_lock); 575 } 576 if (cq->sq.count) { 577 ice_shutdown_sq(hw, cq); 578 mutex_destroy(&cq->sq_lock); 579 } 580 return status; 581 } 582 583 /** 584 * ice_init_ctrlq - main initialization routine for any control Queue 585 * @hw: pointer to the hardware structure 586 * @q_type: specific Control queue type 587 * 588 * Prior to calling this function, drivers *MUST* set the following fields 589 * in the cq->structure: 590 * - cq->num_sq_entries 591 * - cq->num_rq_entries 592 * - cq->rq_buf_size 593 * - cq->sq_buf_size 594 */ 595 static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) 596 { 597 struct ice_ctl_q_info *cq; 598 enum ice_status ret_code; 599 600 switch (q_type) { 601 case ICE_CTL_Q_ADMIN: 602 ice_adminq_init_regs(hw); 603 cq = &hw->adminq; 604 break; 605 case ICE_CTL_Q_MAILBOX: 606 ice_mailbox_init_regs(hw); 607 cq = &hw->mailboxq; 608 break; 609 default: 610 return ICE_ERR_PARAM; 611 } 612 cq->qtype = q_type; 613 614 /* verify input for valid configuration */ 615 if (!cq->num_rq_entries || !cq->num_sq_entries || 616 !cq->rq_buf_size || !cq->sq_buf_size) { 617 return ICE_ERR_CFG; 618 } 619 mutex_init(&cq->sq_lock); 620 mutex_init(&cq->rq_lock); 621 622 /* setup SQ command write back timeout */ 623 cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT; 624 625 /* allocate the ATQ */ 626 ret_code = ice_init_sq(hw, cq); 627 if (ret_code) 628 goto init_ctrlq_destroy_locks; 629 630 /* allocate the ARQ */ 631 ret_code = ice_init_rq(hw, cq); 632 if (ret_code) 633 goto init_ctrlq_free_sq; 634 635 /* success! */ 636 return 0; 637 638 init_ctrlq_free_sq: 639 ice_shutdown_sq(hw, cq); 640 init_ctrlq_destroy_locks: 641 mutex_destroy(&cq->sq_lock); 642 mutex_destroy(&cq->rq_lock); 643 return ret_code; 644 } 645 646 /** 647 * ice_init_all_ctrlq - main initialization routine for all control queues 648 * @hw: pointer to the hardware structure 649 * 650 * Prior to calling this function, drivers *MUST* set the following fields 651 * in the cq->structure for all control queues: 652 * - cq->num_sq_entries 653 * - cq->num_rq_entries 654 * - cq->rq_buf_size 655 * - cq->sq_buf_size 656 */ 657 enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) 658 { 659 enum ice_status ret_code; 660 661 /* Init FW admin queue */ 662 ret_code = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN); 663 if (ret_code) 664 return ret_code; 665 666 ret_code = ice_init_check_adminq(hw); 667 if (ret_code) 668 return ret_code; 669 670 /* Init Mailbox queue */ 671 return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX); 672 } 673 674 /** 675 * ice_shutdown_ctrlq - shutdown routine for any control queue 676 * @hw: pointer to the hardware structure 677 * @q_type: specific Control queue type 678 */ 679 static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) 680 { 681 struct ice_ctl_q_info *cq; 682 683 switch (q_type) { 684 case ICE_CTL_Q_ADMIN: 685 cq = &hw->adminq; 686 if (ice_check_sq_alive(hw, cq)) 687 ice_aq_q_shutdown(hw, true); 688 break; 689 case ICE_CTL_Q_MAILBOX: 690 cq = &hw->mailboxq; 691 break; 692 default: 693 return; 694 } 695 696 if (cq->sq.count) { 697 ice_shutdown_sq(hw, cq); 698 mutex_destroy(&cq->sq_lock); 699 } 700 if (cq->rq.count) { 701 ice_shutdown_rq(hw, cq); 702 mutex_destroy(&cq->rq_lock); 703 } 704 } 705 706 /** 707 * ice_shutdown_all_ctrlq - shutdown routine for all control queues 708 * @hw: pointer to the hardware structure 709 */ 710 void ice_shutdown_all_ctrlq(struct ice_hw *hw) 711 { 712 /* Shutdown FW admin queue */ 713 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); 714 /* Shutdown PF-VF Mailbox */ 715 ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX); 716 } 717 718 /** 719 * ice_clean_sq - cleans Admin send queue (ATQ) 720 * @hw: pointer to the hardware structure 721 * @cq: pointer to the specific Control queue 722 * 723 * returns the number of free desc 724 */ 725 static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 726 { 727 struct ice_ctl_q_ring *sq = &cq->sq; 728 u16 ntc = sq->next_to_clean; 729 struct ice_sq_cd *details; 730 struct ice_aq_desc *desc; 731 732 desc = ICE_CTL_Q_DESC(*sq, ntc); 733 details = ICE_CTL_Q_DETAILS(*sq, ntc); 734 735 while (rd32(hw, cq->sq.head) != ntc) { 736 ice_debug(hw, ICE_DBG_AQ_MSG, 737 "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head)); 738 memset(desc, 0, sizeof(*desc)); 739 memset(details, 0, sizeof(*details)); 740 ntc++; 741 if (ntc == sq->count) 742 ntc = 0; 743 desc = ICE_CTL_Q_DESC(*sq, ntc); 744 details = ICE_CTL_Q_DETAILS(*sq, ntc); 745 } 746 747 sq->next_to_clean = ntc; 748 749 return ICE_CTL_Q_DESC_UNUSED(sq); 750 } 751 752 /** 753 * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ) 754 * @hw: pointer to the hw struct 755 * @cq: pointer to the specific Control queue 756 * 757 * Returns true if the firmware has processed all descriptors on the 758 * admin send queue. Returns false if there are still requests pending. 759 */ 760 static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq) 761 { 762 /* AQ designers suggest use of head for better 763 * timing reliability than DD bit 764 */ 765 return rd32(hw, cq->sq.head) == cq->sq.next_to_use; 766 } 767 768 /** 769 * ice_sq_send_cmd - send command to Control Queue (ATQ) 770 * @hw: pointer to the hw struct 771 * @cq: pointer to the specific Control queue 772 * @desc: prefilled descriptor describing the command (non DMA mem) 773 * @buf: buffer to use for indirect commands (or NULL for direct commands) 774 * @buf_size: size of buffer for indirect commands (or 0 for direct commands) 775 * @cd: pointer to command details structure 776 * 777 * This is the main send command routine for the ATQ. It runs the queue, 778 * cleans the queue, etc. 779 */ 780 enum ice_status 781 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, 782 struct ice_aq_desc *desc, void *buf, u16 buf_size, 783 struct ice_sq_cd *cd) 784 { 785 struct ice_dma_mem *dma_buf = NULL; 786 struct ice_aq_desc *desc_on_ring; 787 bool cmd_completed = false; 788 enum ice_status status = 0; 789 struct ice_sq_cd *details; 790 u32 total_delay = 0; 791 u16 retval = 0; 792 u32 val = 0; 793 794 /* if reset is in progress return a soft error */ 795 if (hw->reset_ongoing) 796 return ICE_ERR_RESET_ONGOING; 797 mutex_lock(&cq->sq_lock); 798 799 cq->sq_last_status = ICE_AQ_RC_OK; 800 801 if (!cq->sq.count) { 802 ice_debug(hw, ICE_DBG_AQ_MSG, 803 "Control Send queue not initialized.\n"); 804 status = ICE_ERR_AQ_EMPTY; 805 goto sq_send_command_error; 806 } 807 808 if ((buf && !buf_size) || (!buf && buf_size)) { 809 status = ICE_ERR_PARAM; 810 goto sq_send_command_error; 811 } 812 813 if (buf) { 814 if (buf_size > cq->sq_buf_size) { 815 ice_debug(hw, ICE_DBG_AQ_MSG, 816 "Invalid buffer size for Control Send queue: %d.\n", 817 buf_size); 818 status = ICE_ERR_INVAL_SIZE; 819 goto sq_send_command_error; 820 } 821 822 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_BUF); 823 if (buf_size > ICE_AQ_LG_BUF) 824 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB); 825 } 826 827 val = rd32(hw, cq->sq.head); 828 if (val >= cq->num_sq_entries) { 829 ice_debug(hw, ICE_DBG_AQ_MSG, 830 "head overrun at %d in the Control Send Queue ring\n", 831 val); 832 status = ICE_ERR_AQ_EMPTY; 833 goto sq_send_command_error; 834 } 835 836 details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use); 837 if (cd) 838 *details = *cd; 839 else 840 memset(details, 0, sizeof(*details)); 841 842 /* Call clean and check queue available function to reclaim the 843 * descriptors that were processed by FW/MBX; the function returns the 844 * number of desc available. The clean function called here could be 845 * called in a separate thread in case of asynchronous completions. 846 */ 847 if (ice_clean_sq(hw, cq) == 0) { 848 ice_debug(hw, ICE_DBG_AQ_MSG, 849 "Error: Control Send Queue is full.\n"); 850 status = ICE_ERR_AQ_FULL; 851 goto sq_send_command_error; 852 } 853 854 /* initialize the temp desc pointer with the right desc */ 855 desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use); 856 857 /* if the desc is available copy the temp desc to the right place */ 858 memcpy(desc_on_ring, desc, sizeof(*desc_on_ring)); 859 860 /* if buf is not NULL assume indirect command */ 861 if (buf) { 862 dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use]; 863 /* copy the user buf into the respective DMA buf */ 864 memcpy(dma_buf->va, buf, buf_size); 865 desc_on_ring->datalen = cpu_to_le16(buf_size); 866 867 /* Update the address values in the desc with the pa value 868 * for respective buffer 869 */ 870 desc_on_ring->params.generic.addr_high = 871 cpu_to_le32(upper_32_bits(dma_buf->pa)); 872 desc_on_ring->params.generic.addr_low = 873 cpu_to_le32(lower_32_bits(dma_buf->pa)); 874 } 875 876 /* Debug desc and buffer */ 877 ice_debug(hw, ICE_DBG_AQ_MSG, 878 "ATQ: Control Send queue desc and buffer:\n"); 879 880 ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc_on_ring, buf, buf_size); 881 882 (cq->sq.next_to_use)++; 883 if (cq->sq.next_to_use == cq->sq.count) 884 cq->sq.next_to_use = 0; 885 wr32(hw, cq->sq.tail, cq->sq.next_to_use); 886 887 do { 888 if (ice_sq_done(hw, cq)) 889 break; 890 891 mdelay(1); 892 total_delay++; 893 } while (total_delay < cq->sq_cmd_timeout); 894 895 /* if ready, copy the desc back to temp */ 896 if (ice_sq_done(hw, cq)) { 897 memcpy(desc, desc_on_ring, sizeof(*desc)); 898 if (buf) { 899 /* get returned length to copy */ 900 u16 copy_size = le16_to_cpu(desc->datalen); 901 902 if (copy_size > buf_size) { 903 ice_debug(hw, ICE_DBG_AQ_MSG, 904 "Return len %d > than buf len %d\n", 905 copy_size, buf_size); 906 status = ICE_ERR_AQ_ERROR; 907 } else { 908 memcpy(buf, dma_buf->va, copy_size); 909 } 910 } 911 retval = le16_to_cpu(desc->retval); 912 if (retval) { 913 ice_debug(hw, ICE_DBG_AQ_MSG, 914 "Control Send Queue command completed with error 0x%x\n", 915 retval); 916 917 /* strip off FW internal code */ 918 retval &= 0xff; 919 } 920 cmd_completed = true; 921 if (!status && retval != ICE_AQ_RC_OK) 922 status = ICE_ERR_AQ_ERROR; 923 cq->sq_last_status = (enum ice_aq_err)retval; 924 } 925 926 ice_debug(hw, ICE_DBG_AQ_MSG, 927 "ATQ: desc and buffer writeback:\n"); 928 929 ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, buf, buf_size); 930 931 /* save writeback AQ if requested */ 932 if (details->wb_desc) 933 memcpy(details->wb_desc, desc_on_ring, 934 sizeof(*details->wb_desc)); 935 936 /* update the error if time out occurred */ 937 if (!cmd_completed) { 938 ice_debug(hw, ICE_DBG_AQ_MSG, 939 "Control Send Queue Writeback timeout.\n"); 940 status = ICE_ERR_AQ_TIMEOUT; 941 } 942 943 sq_send_command_error: 944 mutex_unlock(&cq->sq_lock); 945 return status; 946 } 947 948 /** 949 * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function 950 * @desc: pointer to the temp descriptor (non DMA mem) 951 * @opcode: the opcode can be used to decide which flags to turn off or on 952 * 953 * Fill the desc with default values 954 */ 955 void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode) 956 { 957 /* zero out the desc */ 958 memset(desc, 0, sizeof(*desc)); 959 desc->opcode = cpu_to_le16(opcode); 960 desc->flags = cpu_to_le16(ICE_AQ_FLAG_SI); 961 } 962 963 /** 964 * ice_clean_rq_elem 965 * @hw: pointer to the hw struct 966 * @cq: pointer to the specific Control queue 967 * @e: event info from the receive descriptor, includes any buffers 968 * @pending: number of events that could be left to process 969 * 970 * This function cleans one Admin Receive Queue element and returns 971 * the contents through e. It can also return how many events are 972 * left to process through 'pending'. 973 */ 974 enum ice_status 975 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, 976 struct ice_rq_event_info *e, u16 *pending) 977 { 978 u16 ntc = cq->rq.next_to_clean; 979 enum ice_status ret_code = 0; 980 struct ice_aq_desc *desc; 981 struct ice_dma_mem *bi; 982 u16 desc_idx; 983 u16 datalen; 984 u16 flags; 985 u16 ntu; 986 987 /* pre-clean the event info */ 988 memset(&e->desc, 0, sizeof(e->desc)); 989 990 /* take the lock before we start messing with the ring */ 991 mutex_lock(&cq->rq_lock); 992 993 if (!cq->rq.count) { 994 ice_debug(hw, ICE_DBG_AQ_MSG, 995 "Control Receive queue not initialized.\n"); 996 ret_code = ICE_ERR_AQ_EMPTY; 997 goto clean_rq_elem_err; 998 } 999 1000 /* set next_to_use to head */ 1001 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); 1002 1003 if (ntu == ntc) { 1004 /* nothing to do - shouldn't need to update ring's values */ 1005 ret_code = ICE_ERR_AQ_NO_WORK; 1006 goto clean_rq_elem_out; 1007 } 1008 1009 /* now clean the next descriptor */ 1010 desc = ICE_CTL_Q_DESC(cq->rq, ntc); 1011 desc_idx = ntc; 1012 1013 cq->rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval); 1014 flags = le16_to_cpu(desc->flags); 1015 if (flags & ICE_AQ_FLAG_ERR) { 1016 ret_code = ICE_ERR_AQ_ERROR; 1017 ice_debug(hw, ICE_DBG_AQ_MSG, 1018 "Control Receive Queue Event received with error 0x%x\n", 1019 cq->rq_last_status); 1020 } 1021 memcpy(&e->desc, desc, sizeof(e->desc)); 1022 datalen = le16_to_cpu(desc->datalen); 1023 e->msg_len = min(datalen, e->buf_len); 1024 if (e->msg_buf && e->msg_len) 1025 memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len); 1026 1027 ice_debug(hw, ICE_DBG_AQ_MSG, "ARQ: desc and buffer:\n"); 1028 1029 ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, e->msg_buf, 1030 cq->rq_buf_size); 1031 1032 /* Restore the original datalen and buffer address in the desc, 1033 * FW updates datalen to indicate the event message size 1034 */ 1035 bi = &cq->rq.r.rq_bi[ntc]; 1036 memset(desc, 0, sizeof(*desc)); 1037 1038 desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF); 1039 if (cq->rq_buf_size > ICE_AQ_LG_BUF) 1040 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB); 1041 desc->datalen = cpu_to_le16(bi->size); 1042 desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa)); 1043 desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); 1044 1045 /* set tail = the last cleaned desc index. */ 1046 wr32(hw, cq->rq.tail, ntc); 1047 /* ntc is updated to tail + 1 */ 1048 ntc++; 1049 if (ntc == cq->num_rq_entries) 1050 ntc = 0; 1051 cq->rq.next_to_clean = ntc; 1052 cq->rq.next_to_use = ntu; 1053 1054 clean_rq_elem_out: 1055 /* Set pending if needed, unlock and return */ 1056 if (pending) { 1057 /* re-read HW head to calculate actual pending messages */ 1058 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); 1059 *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc)); 1060 } 1061 clean_rq_elem_err: 1062 mutex_unlock(&cq->rq_lock); 1063 1064 return ret_code; 1065 } 1066