1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice_common.h" 5 6 #define ICE_CQ_INIT_REGS(qinfo, prefix) \ 7 do { \ 8 (qinfo)->sq.head = prefix##_ATQH; \ 9 (qinfo)->sq.tail = prefix##_ATQT; \ 10 (qinfo)->sq.len = prefix##_ATQLEN; \ 11 (qinfo)->sq.bah = prefix##_ATQBAH; \ 12 (qinfo)->sq.bal = prefix##_ATQBAL; \ 13 (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \ 14 (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \ 15 (qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M; \ 16 (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \ 17 (qinfo)->rq.head = prefix##_ARQH; \ 18 (qinfo)->rq.tail = prefix##_ARQT; \ 19 (qinfo)->rq.len = prefix##_ARQLEN; \ 20 (qinfo)->rq.bah = prefix##_ARQBAH; \ 21 (qinfo)->rq.bal = prefix##_ARQBAL; \ 22 (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \ 23 (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \ 24 (qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M; \ 25 (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \ 26 } while (0) 27 28 /** 29 * ice_adminq_init_regs - Initialize AdminQ registers 30 * @hw: pointer to the hardware structure 31 * 32 * This assumes the alloc_sq and alloc_rq functions have already been called 33 */ 34 static void ice_adminq_init_regs(struct ice_hw *hw) 35 { 36 struct ice_ctl_q_info *cq = &hw->adminq; 37 38 ICE_CQ_INIT_REGS(cq, PF_FW); 39 } 40 41 /** 42 * ice_mailbox_init_regs - Initialize Mailbox registers 43 * @hw: pointer to the hardware structure 44 * 45 * This assumes the alloc_sq and alloc_rq functions have already been called 46 */ 47 static void ice_mailbox_init_regs(struct ice_hw *hw) 48 { 49 struct ice_ctl_q_info *cq = &hw->mailboxq; 50 51 ICE_CQ_INIT_REGS(cq, PF_MBX); 52 } 53 54 /** 55 * ice_sb_init_regs - Initialize Sideband registers 56 * @hw: pointer to the hardware structure 57 * 58 * This assumes the alloc_sq and alloc_rq functions have already been called 59 */ 60 static void ice_sb_init_regs(struct ice_hw *hw) 61 { 62 struct ice_ctl_q_info *cq = &hw->sbq; 63 64 ICE_CQ_INIT_REGS(cq, PF_SB); 65 } 66 67 /** 68 * ice_check_sq_alive 69 * @hw: pointer to the HW struct 70 * @cq: pointer to the specific Control queue 71 * 72 * Returns true if Queue is enabled else false. 73 */ 74 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq) 75 { 76 /* check both queue-length and queue-enable fields */ 77 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask) 78 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask | 79 cq->sq.len_ena_mask)) == 80 (cq->num_sq_entries | cq->sq.len_ena_mask); 81 82 return false; 83 } 84 85 /** 86 * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings 87 * @hw: pointer to the hardware structure 88 * @cq: pointer to the specific Control queue 89 */ 90 static int 91 ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) 92 { 93 size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc); 94 95 cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size, 96 &cq->sq.desc_buf.pa, 97 GFP_KERNEL | __GFP_ZERO); 98 if (!cq->sq.desc_buf.va) 99 return -ENOMEM; 100 cq->sq.desc_buf.size = size; 101 102 cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, 103 sizeof(struct ice_sq_cd), GFP_KERNEL); 104 if (!cq->sq.cmd_buf) { 105 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size, 106 cq->sq.desc_buf.va, cq->sq.desc_buf.pa); 107 cq->sq.desc_buf.va = NULL; 108 cq->sq.desc_buf.pa = 0; 109 cq->sq.desc_buf.size = 0; 110 return -ENOMEM; 111 } 112 113 return 0; 114 } 115 116 /** 117 * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings 118 * @hw: pointer to the hardware structure 119 * @cq: pointer to the specific Control queue 120 */ 121 static int 122 ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) 123 { 124 size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc); 125 126 cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size, 127 &cq->rq.desc_buf.pa, 128 GFP_KERNEL | __GFP_ZERO); 129 if (!cq->rq.desc_buf.va) 130 return -ENOMEM; 131 cq->rq.desc_buf.size = size; 132 return 0; 133 } 134 135 /** 136 * ice_free_cq_ring - Free control queue ring 137 * @hw: pointer to the hardware structure 138 * @ring: pointer to the specific control queue ring 139 * 140 * This assumes the posted buffers have already been cleaned 141 * and de-allocated 142 */ 143 static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring) 144 { 145 dmam_free_coherent(ice_hw_to_dev(hw), ring->desc_buf.size, 146 ring->desc_buf.va, ring->desc_buf.pa); 147 ring->desc_buf.va = NULL; 148 ring->desc_buf.pa = 0; 149 ring->desc_buf.size = 0; 150 } 151 152 /** 153 * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ 154 * @hw: pointer to the hardware structure 155 * @cq: pointer to the specific Control queue 156 */ 157 static int 158 ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 159 { 160 int i; 161 162 /* We'll be allocating the buffer info memory first, then we can 163 * allocate the mapped buffers for the event processing 164 */ 165 cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries, 166 sizeof(cq->rq.desc_buf), GFP_KERNEL); 167 if (!cq->rq.dma_head) 168 return -ENOMEM; 169 cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head; 170 171 /* allocate the mapped buffers */ 172 for (i = 0; i < cq->num_rq_entries; i++) { 173 struct ice_aq_desc *desc; 174 struct ice_dma_mem *bi; 175 176 bi = &cq->rq.r.rq_bi[i]; 177 bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw), 178 cq->rq_buf_size, &bi->pa, 179 GFP_KERNEL | __GFP_ZERO); 180 if (!bi->va) 181 goto unwind_alloc_rq_bufs; 182 bi->size = cq->rq_buf_size; 183 184 /* now configure the descriptors for use */ 185 desc = ICE_CTL_Q_DESC(cq->rq, i); 186 187 desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF); 188 if (cq->rq_buf_size > ICE_AQ_LG_BUF) 189 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB); 190 desc->opcode = 0; 191 /* This is in accordance with Admin queue design, there is no 192 * register for buffer size configuration 193 */ 194 desc->datalen = cpu_to_le16(bi->size); 195 desc->retval = 0; 196 desc->cookie_high = 0; 197 desc->cookie_low = 0; 198 desc->params.generic.addr_high = 199 cpu_to_le32(upper_32_bits(bi->pa)); 200 desc->params.generic.addr_low = 201 cpu_to_le32(lower_32_bits(bi->pa)); 202 desc->params.generic.param0 = 0; 203 desc->params.generic.param1 = 0; 204 } 205 return 0; 206 207 unwind_alloc_rq_bufs: 208 /* don't try to free the one that failed... */ 209 i--; 210 for (; i >= 0; i--) { 211 dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size, 212 cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa); 213 cq->rq.r.rq_bi[i].va = NULL; 214 cq->rq.r.rq_bi[i].pa = 0; 215 cq->rq.r.rq_bi[i].size = 0; 216 } 217 cq->rq.r.rq_bi = NULL; 218 devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head); 219 cq->rq.dma_head = NULL; 220 221 return -ENOMEM; 222 } 223 224 /** 225 * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ 226 * @hw: pointer to the hardware structure 227 * @cq: pointer to the specific Control queue 228 */ 229 static int 230 ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 231 { 232 int i; 233 234 /* No mapped memory needed yet, just the buffer info structures */ 235 cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, 236 sizeof(cq->sq.desc_buf), GFP_KERNEL); 237 if (!cq->sq.dma_head) 238 return -ENOMEM; 239 cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head; 240 241 /* allocate the mapped buffers */ 242 for (i = 0; i < cq->num_sq_entries; i++) { 243 struct ice_dma_mem *bi; 244 245 bi = &cq->sq.r.sq_bi[i]; 246 bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw), 247 cq->sq_buf_size, &bi->pa, 248 GFP_KERNEL | __GFP_ZERO); 249 if (!bi->va) 250 goto unwind_alloc_sq_bufs; 251 bi->size = cq->sq_buf_size; 252 } 253 return 0; 254 255 unwind_alloc_sq_bufs: 256 /* don't try to free the one that failed... */ 257 i--; 258 for (; i >= 0; i--) { 259 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.r.sq_bi[i].size, 260 cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa); 261 cq->sq.r.sq_bi[i].va = NULL; 262 cq->sq.r.sq_bi[i].pa = 0; 263 cq->sq.r.sq_bi[i].size = 0; 264 } 265 cq->sq.r.sq_bi = NULL; 266 devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head); 267 cq->sq.dma_head = NULL; 268 269 return -ENOMEM; 270 } 271 272 static int 273 ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries) 274 { 275 /* Clear Head and Tail */ 276 wr32(hw, ring->head, 0); 277 wr32(hw, ring->tail, 0); 278 279 /* set starting point */ 280 wr32(hw, ring->len, (num_entries | ring->len_ena_mask)); 281 wr32(hw, ring->bal, lower_32_bits(ring->desc_buf.pa)); 282 wr32(hw, ring->bah, upper_32_bits(ring->desc_buf.pa)); 283 284 /* Check one register to verify that config was applied */ 285 if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa)) 286 return -EIO; 287 288 return 0; 289 } 290 291 /** 292 * ice_cfg_sq_regs - configure Control ATQ registers 293 * @hw: pointer to the hardware structure 294 * @cq: pointer to the specific Control queue 295 * 296 * Configure base address and length registers for the transmit queue 297 */ 298 static int ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 299 { 300 return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries); 301 } 302 303 /** 304 * ice_cfg_rq_regs - configure Control ARQ register 305 * @hw: pointer to the hardware structure 306 * @cq: pointer to the specific Control queue 307 * 308 * Configure base address and length registers for the receive (event queue) 309 */ 310 static int ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 311 { 312 int status; 313 314 status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries); 315 if (status) 316 return status; 317 318 /* Update tail in the HW to post pre-allocated buffers */ 319 wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1)); 320 321 return 0; 322 } 323 324 #define ICE_FREE_CQ_BUFS(hw, qi, ring) \ 325 do { \ 326 /* free descriptors */ \ 327 if ((qi)->ring.r.ring##_bi) { \ 328 int i; \ 329 \ 330 for (i = 0; i < (qi)->num_##ring##_entries; i++) \ 331 if ((qi)->ring.r.ring##_bi[i].pa) { \ 332 dmam_free_coherent(ice_hw_to_dev(hw), \ 333 (qi)->ring.r.ring##_bi[i].size, \ 334 (qi)->ring.r.ring##_bi[i].va, \ 335 (qi)->ring.r.ring##_bi[i].pa); \ 336 (qi)->ring.r.ring##_bi[i].va = NULL;\ 337 (qi)->ring.r.ring##_bi[i].pa = 0;\ 338 (qi)->ring.r.ring##_bi[i].size = 0;\ 339 } \ 340 } \ 341 /* free the buffer info list */ \ 342 if ((qi)->ring.cmd_buf) \ 343 devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \ 344 /* free DMA head */ \ 345 devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \ 346 } while (0) 347 348 /** 349 * ice_init_sq - main initialization routine for Control ATQ 350 * @hw: pointer to the hardware structure 351 * @cq: pointer to the specific Control queue 352 * 353 * This is the main initialization routine for the Control Send Queue 354 * Prior to calling this function, the driver *MUST* set the following fields 355 * in the cq->structure: 356 * - cq->num_sq_entries 357 * - cq->sq_buf_size 358 * 359 * Do *NOT* hold the lock when calling this as the memory allocation routines 360 * called are not going to be atomic context safe 361 */ 362 static int ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 363 { 364 int ret_code; 365 366 if (cq->sq.count > 0) { 367 /* queue already initialized */ 368 ret_code = -EBUSY; 369 goto init_ctrlq_exit; 370 } 371 372 /* verify input for valid configuration */ 373 if (!cq->num_sq_entries || !cq->sq_buf_size) { 374 ret_code = -EIO; 375 goto init_ctrlq_exit; 376 } 377 378 cq->sq.next_to_use = 0; 379 cq->sq.next_to_clean = 0; 380 381 /* allocate the ring memory */ 382 ret_code = ice_alloc_ctrlq_sq_ring(hw, cq); 383 if (ret_code) 384 goto init_ctrlq_exit; 385 386 /* allocate buffers in the rings */ 387 ret_code = ice_alloc_sq_bufs(hw, cq); 388 if (ret_code) 389 goto init_ctrlq_free_rings; 390 391 /* initialize base registers */ 392 ret_code = ice_cfg_sq_regs(hw, cq); 393 if (ret_code) 394 goto init_ctrlq_free_rings; 395 396 /* success! */ 397 cq->sq.count = cq->num_sq_entries; 398 goto init_ctrlq_exit; 399 400 init_ctrlq_free_rings: 401 ICE_FREE_CQ_BUFS(hw, cq, sq); 402 ice_free_cq_ring(hw, &cq->sq); 403 404 init_ctrlq_exit: 405 return ret_code; 406 } 407 408 /** 409 * ice_init_rq - initialize ARQ 410 * @hw: pointer to the hardware structure 411 * @cq: pointer to the specific Control queue 412 * 413 * The main initialization routine for the Admin Receive (Event) Queue. 414 * Prior to calling this function, the driver *MUST* set the following fields 415 * in the cq->structure: 416 * - cq->num_rq_entries 417 * - cq->rq_buf_size 418 * 419 * Do *NOT* hold the lock when calling this as the memory allocation routines 420 * called are not going to be atomic context safe 421 */ 422 static int ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 423 { 424 int ret_code; 425 426 if (cq->rq.count > 0) { 427 /* queue already initialized */ 428 ret_code = -EBUSY; 429 goto init_ctrlq_exit; 430 } 431 432 /* verify input for valid configuration */ 433 if (!cq->num_rq_entries || !cq->rq_buf_size) { 434 ret_code = -EIO; 435 goto init_ctrlq_exit; 436 } 437 438 cq->rq.next_to_use = 0; 439 cq->rq.next_to_clean = 0; 440 441 /* allocate the ring memory */ 442 ret_code = ice_alloc_ctrlq_rq_ring(hw, cq); 443 if (ret_code) 444 goto init_ctrlq_exit; 445 446 /* allocate buffers in the rings */ 447 ret_code = ice_alloc_rq_bufs(hw, cq); 448 if (ret_code) 449 goto init_ctrlq_free_rings; 450 451 /* initialize base registers */ 452 ret_code = ice_cfg_rq_regs(hw, cq); 453 if (ret_code) 454 goto init_ctrlq_free_rings; 455 456 /* success! */ 457 cq->rq.count = cq->num_rq_entries; 458 goto init_ctrlq_exit; 459 460 init_ctrlq_free_rings: 461 ICE_FREE_CQ_BUFS(hw, cq, rq); 462 ice_free_cq_ring(hw, &cq->rq); 463 464 init_ctrlq_exit: 465 return ret_code; 466 } 467 468 /** 469 * ice_shutdown_sq - shutdown the Control ATQ 470 * @hw: pointer to the hardware structure 471 * @cq: pointer to the specific Control queue 472 * 473 * The main shutdown routine for the Control Transmit Queue 474 */ 475 static int ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 476 { 477 int ret_code = 0; 478 479 mutex_lock(&cq->sq_lock); 480 481 if (!cq->sq.count) { 482 ret_code = -EBUSY; 483 goto shutdown_sq_out; 484 } 485 486 /* Stop firmware AdminQ processing */ 487 wr32(hw, cq->sq.head, 0); 488 wr32(hw, cq->sq.tail, 0); 489 wr32(hw, cq->sq.len, 0); 490 wr32(hw, cq->sq.bal, 0); 491 wr32(hw, cq->sq.bah, 0); 492 493 cq->sq.count = 0; /* to indicate uninitialized queue */ 494 495 /* free ring buffers and the ring itself */ 496 ICE_FREE_CQ_BUFS(hw, cq, sq); 497 ice_free_cq_ring(hw, &cq->sq); 498 499 shutdown_sq_out: 500 mutex_unlock(&cq->sq_lock); 501 return ret_code; 502 } 503 504 /** 505 * ice_aq_ver_check - Check the reported AQ API version. 506 * @hw: pointer to the hardware structure 507 * 508 * Checks if the driver should load on a given AQ API version. 509 * 510 * Return: 'true' iff the driver should attempt to load. 'false' otherwise. 511 */ 512 static bool ice_aq_ver_check(struct ice_hw *hw) 513 { 514 if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) { 515 /* Major API version is newer than expected, don't load */ 516 dev_warn(ice_hw_to_dev(hw), 517 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); 518 return false; 519 } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) { 520 if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2)) 521 dev_info(ice_hw_to_dev(hw), 522 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); 523 else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR) 524 dev_info(ice_hw_to_dev(hw), 525 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); 526 } else { 527 /* Major API version is older than expected, log a warning */ 528 dev_info(ice_hw_to_dev(hw), 529 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); 530 } 531 return true; 532 } 533 534 /** 535 * ice_shutdown_rq - shutdown Control ARQ 536 * @hw: pointer to the hardware structure 537 * @cq: pointer to the specific Control queue 538 * 539 * The main shutdown routine for the Control Receive Queue 540 */ 541 static int ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 542 { 543 int ret_code = 0; 544 545 mutex_lock(&cq->rq_lock); 546 547 if (!cq->rq.count) { 548 ret_code = -EBUSY; 549 goto shutdown_rq_out; 550 } 551 552 /* Stop Control Queue processing */ 553 wr32(hw, cq->rq.head, 0); 554 wr32(hw, cq->rq.tail, 0); 555 wr32(hw, cq->rq.len, 0); 556 wr32(hw, cq->rq.bal, 0); 557 wr32(hw, cq->rq.bah, 0); 558 559 /* set rq.count to 0 to indicate uninitialized queue */ 560 cq->rq.count = 0; 561 562 /* free ring buffers and the ring itself */ 563 ICE_FREE_CQ_BUFS(hw, cq, rq); 564 ice_free_cq_ring(hw, &cq->rq); 565 566 shutdown_rq_out: 567 mutex_unlock(&cq->rq_lock); 568 return ret_code; 569 } 570 571 /** 572 * ice_init_check_adminq - Check version for Admin Queue to know if its alive 573 * @hw: pointer to the hardware structure 574 */ 575 static int ice_init_check_adminq(struct ice_hw *hw) 576 { 577 struct ice_ctl_q_info *cq = &hw->adminq; 578 int status; 579 580 status = ice_aq_get_fw_ver(hw, NULL); 581 if (status) 582 goto init_ctrlq_free_rq; 583 584 if (!ice_aq_ver_check(hw)) { 585 status = -EIO; 586 goto init_ctrlq_free_rq; 587 } 588 589 return 0; 590 591 init_ctrlq_free_rq: 592 ice_shutdown_rq(hw, cq); 593 ice_shutdown_sq(hw, cq); 594 return status; 595 } 596 597 /** 598 * ice_init_ctrlq - main initialization routine for any control Queue 599 * @hw: pointer to the hardware structure 600 * @q_type: specific Control queue type 601 * 602 * Prior to calling this function, the driver *MUST* set the following fields 603 * in the cq->structure: 604 * - cq->num_sq_entries 605 * - cq->num_rq_entries 606 * - cq->rq_buf_size 607 * - cq->sq_buf_size 608 * 609 * NOTE: this function does not initialize the controlq locks 610 */ 611 static int ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) 612 { 613 struct ice_ctl_q_info *cq; 614 int ret_code; 615 616 switch (q_type) { 617 case ICE_CTL_Q_ADMIN: 618 ice_adminq_init_regs(hw); 619 cq = &hw->adminq; 620 break; 621 case ICE_CTL_Q_SB: 622 ice_sb_init_regs(hw); 623 cq = &hw->sbq; 624 break; 625 case ICE_CTL_Q_MAILBOX: 626 ice_mailbox_init_regs(hw); 627 cq = &hw->mailboxq; 628 break; 629 default: 630 return -EINVAL; 631 } 632 cq->qtype = q_type; 633 634 /* verify input for valid configuration */ 635 if (!cq->num_rq_entries || !cq->num_sq_entries || 636 !cq->rq_buf_size || !cq->sq_buf_size) { 637 return -EIO; 638 } 639 640 /* allocate the ATQ */ 641 ret_code = ice_init_sq(hw, cq); 642 if (ret_code) 643 return ret_code; 644 645 /* allocate the ARQ */ 646 ret_code = ice_init_rq(hw, cq); 647 if (ret_code) 648 goto init_ctrlq_free_sq; 649 650 /* success! */ 651 return 0; 652 653 init_ctrlq_free_sq: 654 ice_shutdown_sq(hw, cq); 655 return ret_code; 656 } 657 658 /** 659 * ice_is_sbq_supported - is the sideband queue supported 660 * @hw: pointer to the hardware structure 661 * 662 * Returns true if the sideband control queue interface is 663 * supported for the device, false otherwise 664 */ 665 bool ice_is_sbq_supported(struct ice_hw *hw) 666 { 667 /* The device sideband queue is only supported on devices with the 668 * generic MAC type. 669 */ 670 return hw->mac_type == ICE_MAC_GENERIC; 671 } 672 673 /** 674 * ice_get_sbq - returns the right control queue to use for sideband 675 * @hw: pointer to the hardware structure 676 */ 677 struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw) 678 { 679 if (ice_is_sbq_supported(hw)) 680 return &hw->sbq; 681 return &hw->adminq; 682 } 683 684 /** 685 * ice_shutdown_ctrlq - shutdown routine for any control queue 686 * @hw: pointer to the hardware structure 687 * @q_type: specific Control queue type 688 * 689 * NOTE: this function does not destroy the control queue locks. 690 */ 691 static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) 692 { 693 struct ice_ctl_q_info *cq; 694 695 switch (q_type) { 696 case ICE_CTL_Q_ADMIN: 697 cq = &hw->adminq; 698 if (ice_check_sq_alive(hw, cq)) 699 ice_aq_q_shutdown(hw, true); 700 break; 701 case ICE_CTL_Q_SB: 702 cq = &hw->sbq; 703 break; 704 case ICE_CTL_Q_MAILBOX: 705 cq = &hw->mailboxq; 706 break; 707 default: 708 return; 709 } 710 711 ice_shutdown_sq(hw, cq); 712 ice_shutdown_rq(hw, cq); 713 } 714 715 /** 716 * ice_shutdown_all_ctrlq - shutdown routine for all control queues 717 * @hw: pointer to the hardware structure 718 * 719 * NOTE: this function does not destroy the control queue locks. The driver 720 * may call this at runtime to shutdown and later restart control queues, such 721 * as in response to a reset event. 722 */ 723 void ice_shutdown_all_ctrlq(struct ice_hw *hw) 724 { 725 /* Shutdown FW admin queue */ 726 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); 727 /* Shutdown PHY Sideband */ 728 if (ice_is_sbq_supported(hw)) 729 ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB); 730 /* Shutdown PF-VF Mailbox */ 731 ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX); 732 } 733 734 /** 735 * ice_init_all_ctrlq - main initialization routine for all control queues 736 * @hw: pointer to the hardware structure 737 * 738 * Prior to calling this function, the driver MUST* set the following fields 739 * in the cq->structure for all control queues: 740 * - cq->num_sq_entries 741 * - cq->num_rq_entries 742 * - cq->rq_buf_size 743 * - cq->sq_buf_size 744 * 745 * NOTE: this function does not initialize the controlq locks. 746 */ 747 int ice_init_all_ctrlq(struct ice_hw *hw) 748 { 749 u32 retry = 0; 750 int status; 751 752 /* Init FW admin queue */ 753 do { 754 status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN); 755 if (status) 756 return status; 757 758 status = ice_init_check_adminq(hw); 759 if (status != -EIO) 760 break; 761 762 ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n"); 763 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); 764 msleep(ICE_CTL_Q_ADMIN_INIT_MSEC); 765 } while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT); 766 767 if (status) 768 return status; 769 /* sideband control queue (SBQ) interface is not supported on some 770 * devices. Initialize if supported, else fallback to the admin queue 771 * interface 772 */ 773 if (ice_is_sbq_supported(hw)) { 774 status = ice_init_ctrlq(hw, ICE_CTL_Q_SB); 775 if (status) 776 return status; 777 } 778 /* Init Mailbox queue */ 779 return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX); 780 } 781 782 /** 783 * ice_init_ctrlq_locks - Initialize locks for a control queue 784 * @cq: pointer to the control queue 785 * 786 * Initializes the send and receive queue locks for a given control queue. 787 */ 788 static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq) 789 { 790 mutex_init(&cq->sq_lock); 791 mutex_init(&cq->rq_lock); 792 } 793 794 /** 795 * ice_create_all_ctrlq - main initialization routine for all control queues 796 * @hw: pointer to the hardware structure 797 * 798 * Prior to calling this function, the driver *MUST* set the following fields 799 * in the cq->structure for all control queues: 800 * - cq->num_sq_entries 801 * - cq->num_rq_entries 802 * - cq->rq_buf_size 803 * - cq->sq_buf_size 804 * 805 * This function creates all the control queue locks and then calls 806 * ice_init_all_ctrlq. It should be called once during driver load. If the 807 * driver needs to re-initialize control queues at run time it should call 808 * ice_init_all_ctrlq instead. 809 */ 810 int ice_create_all_ctrlq(struct ice_hw *hw) 811 { 812 ice_init_ctrlq_locks(&hw->adminq); 813 if (ice_is_sbq_supported(hw)) 814 ice_init_ctrlq_locks(&hw->sbq); 815 ice_init_ctrlq_locks(&hw->mailboxq); 816 817 return ice_init_all_ctrlq(hw); 818 } 819 820 /** 821 * ice_destroy_ctrlq_locks - Destroy locks for a control queue 822 * @cq: pointer to the control queue 823 * 824 * Destroys the send and receive queue locks for a given control queue. 825 */ 826 static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq) 827 { 828 mutex_destroy(&cq->sq_lock); 829 mutex_destroy(&cq->rq_lock); 830 } 831 832 /** 833 * ice_destroy_all_ctrlq - exit routine for all control queues 834 * @hw: pointer to the hardware structure 835 * 836 * This function shuts down all the control queues and then destroys the 837 * control queue locks. It should be called once during driver unload. The 838 * driver should call ice_shutdown_all_ctrlq if it needs to shut down and 839 * reinitialize control queues, such as in response to a reset event. 840 */ 841 void ice_destroy_all_ctrlq(struct ice_hw *hw) 842 { 843 /* shut down all the control queues first */ 844 ice_shutdown_all_ctrlq(hw); 845 846 ice_destroy_ctrlq_locks(&hw->adminq); 847 if (ice_is_sbq_supported(hw)) 848 ice_destroy_ctrlq_locks(&hw->sbq); 849 ice_destroy_ctrlq_locks(&hw->mailboxq); 850 } 851 852 /** 853 * ice_clean_sq - cleans Admin send queue (ATQ) 854 * @hw: pointer to the hardware structure 855 * @cq: pointer to the specific Control queue 856 * 857 * returns the number of free desc 858 */ 859 static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 860 { 861 struct ice_ctl_q_ring *sq = &cq->sq; 862 u16 ntc = sq->next_to_clean; 863 struct ice_sq_cd *details; 864 struct ice_aq_desc *desc; 865 866 desc = ICE_CTL_Q_DESC(*sq, ntc); 867 details = ICE_CTL_Q_DETAILS(*sq, ntc); 868 869 while (rd32(hw, cq->sq.head) != ntc) { 870 ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head)); 871 memset(desc, 0, sizeof(*desc)); 872 memset(details, 0, sizeof(*details)); 873 ntc++; 874 if (ntc == sq->count) 875 ntc = 0; 876 desc = ICE_CTL_Q_DESC(*sq, ntc); 877 details = ICE_CTL_Q_DETAILS(*sq, ntc); 878 } 879 880 sq->next_to_clean = ntc; 881 882 return ICE_CTL_Q_DESC_UNUSED(sq); 883 } 884 885 /** 886 * ice_debug_cq 887 * @hw: pointer to the hardware structure 888 * @desc: pointer to control queue descriptor 889 * @buf: pointer to command buffer 890 * @buf_len: max length of buf 891 * 892 * Dumps debug log about control command with descriptor contents. 893 */ 894 static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len) 895 { 896 struct ice_aq_desc *cq_desc = desc; 897 u16 len; 898 899 if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) && 900 !((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask)) 901 return; 902 903 if (!desc) 904 return; 905 906 len = le16_to_cpu(cq_desc->datalen); 907 908 ice_debug(hw, ICE_DBG_AQ_DESC, "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", 909 le16_to_cpu(cq_desc->opcode), 910 le16_to_cpu(cq_desc->flags), 911 le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval)); 912 ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n", 913 le32_to_cpu(cq_desc->cookie_high), 914 le32_to_cpu(cq_desc->cookie_low)); 915 ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1) 0x%08X 0x%08X\n", 916 le32_to_cpu(cq_desc->params.generic.param0), 917 le32_to_cpu(cq_desc->params.generic.param1)); 918 ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l) 0x%08X 0x%08X\n", 919 le32_to_cpu(cq_desc->params.generic.addr_high), 920 le32_to_cpu(cq_desc->params.generic.addr_low)); 921 if (buf && cq_desc->datalen != 0) { 922 ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n"); 923 if (buf_len < len) 924 len = buf_len; 925 926 ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, buf, len); 927 } 928 } 929 930 /** 931 * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ) 932 * @hw: pointer to the HW struct 933 * @cq: pointer to the specific Control queue 934 * 935 * Returns true if the firmware has processed all descriptors on the 936 * admin send queue. Returns false if there are still requests pending. 937 */ 938 static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq) 939 { 940 /* AQ designers suggest use of head for better 941 * timing reliability than DD bit 942 */ 943 return rd32(hw, cq->sq.head) == cq->sq.next_to_use; 944 } 945 946 /** 947 * ice_sq_send_cmd - send command to Control Queue (ATQ) 948 * @hw: pointer to the HW struct 949 * @cq: pointer to the specific Control queue 950 * @desc: prefilled descriptor describing the command 951 * @buf: buffer to use for indirect commands (or NULL for direct commands) 952 * @buf_size: size of buffer for indirect commands (or 0 for direct commands) 953 * @cd: pointer to command details structure 954 * 955 * This is the main send command routine for the ATQ. It runs the queue, 956 * cleans the queue, etc. 957 */ 958 int 959 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, 960 struct ice_aq_desc *desc, void *buf, u16 buf_size, 961 struct ice_sq_cd *cd) 962 { 963 struct ice_dma_mem *dma_buf = NULL; 964 struct ice_aq_desc *desc_on_ring; 965 bool cmd_completed = false; 966 struct ice_sq_cd *details; 967 unsigned long timeout; 968 int status = 0; 969 u16 retval = 0; 970 u32 val = 0; 971 972 /* if reset is in progress return a soft error */ 973 if (hw->reset_ongoing) 974 return -EBUSY; 975 mutex_lock(&cq->sq_lock); 976 977 cq->sq_last_status = ICE_AQ_RC_OK; 978 979 if (!cq->sq.count) { 980 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n"); 981 status = -EIO; 982 goto sq_send_command_error; 983 } 984 985 if ((buf && !buf_size) || (!buf && buf_size)) { 986 status = -EINVAL; 987 goto sq_send_command_error; 988 } 989 990 if (buf) { 991 if (buf_size > cq->sq_buf_size) { 992 ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n", 993 buf_size); 994 status = -EINVAL; 995 goto sq_send_command_error; 996 } 997 998 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_BUF); 999 if (buf_size > ICE_AQ_LG_BUF) 1000 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB); 1001 } 1002 1003 val = rd32(hw, cq->sq.head); 1004 if (val >= cq->num_sq_entries) { 1005 ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n", 1006 val); 1007 status = -EIO; 1008 goto sq_send_command_error; 1009 } 1010 1011 details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use); 1012 if (cd) 1013 *details = *cd; 1014 else 1015 memset(details, 0, sizeof(*details)); 1016 1017 /* Call clean and check queue available function to reclaim the 1018 * descriptors that were processed by FW/MBX; the function returns the 1019 * number of desc available. The clean function called here could be 1020 * called in a separate thread in case of asynchronous completions. 1021 */ 1022 if (ice_clean_sq(hw, cq) == 0) { 1023 ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n"); 1024 status = -ENOSPC; 1025 goto sq_send_command_error; 1026 } 1027 1028 /* initialize the temp desc pointer with the right desc */ 1029 desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use); 1030 1031 /* if the desc is available copy the temp desc to the right place */ 1032 memcpy(desc_on_ring, desc, sizeof(*desc_on_ring)); 1033 1034 /* if buf is not NULL assume indirect command */ 1035 if (buf) { 1036 dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use]; 1037 /* copy the user buf into the respective DMA buf */ 1038 memcpy(dma_buf->va, buf, buf_size); 1039 desc_on_ring->datalen = cpu_to_le16(buf_size); 1040 1041 /* Update the address values in the desc with the pa value 1042 * for respective buffer 1043 */ 1044 desc_on_ring->params.generic.addr_high = 1045 cpu_to_le32(upper_32_bits(dma_buf->pa)); 1046 desc_on_ring->params.generic.addr_low = 1047 cpu_to_le32(lower_32_bits(dma_buf->pa)); 1048 } 1049 1050 /* Debug desc and buffer */ 1051 ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n"); 1052 1053 ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size); 1054 1055 (cq->sq.next_to_use)++; 1056 if (cq->sq.next_to_use == cq->sq.count) 1057 cq->sq.next_to_use = 0; 1058 wr32(hw, cq->sq.tail, cq->sq.next_to_use); 1059 1060 timeout = jiffies + ICE_CTL_Q_SQ_CMD_TIMEOUT; 1061 do { 1062 if (ice_sq_done(hw, cq)) 1063 break; 1064 1065 usleep_range(ICE_CTL_Q_SQ_CMD_USEC, 1066 ICE_CTL_Q_SQ_CMD_USEC * 3 / 2); 1067 } while (time_before(jiffies, timeout)); 1068 1069 /* if ready, copy the desc back to temp */ 1070 if (ice_sq_done(hw, cq)) { 1071 memcpy(desc, desc_on_ring, sizeof(*desc)); 1072 if (buf) { 1073 /* get returned length to copy */ 1074 u16 copy_size = le16_to_cpu(desc->datalen); 1075 1076 if (copy_size > buf_size) { 1077 ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n", 1078 copy_size, buf_size); 1079 status = -EIO; 1080 } else { 1081 memcpy(buf, dma_buf->va, copy_size); 1082 } 1083 } 1084 retval = le16_to_cpu(desc->retval); 1085 if (retval) { 1086 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue command 0x%04X completed with error 0x%X\n", 1087 le16_to_cpu(desc->opcode), 1088 retval); 1089 1090 /* strip off FW internal code */ 1091 retval &= 0xff; 1092 } 1093 cmd_completed = true; 1094 if (!status && retval != ICE_AQ_RC_OK) 1095 status = -EIO; 1096 cq->sq_last_status = (enum ice_aq_err)retval; 1097 } 1098 1099 ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n"); 1100 1101 ice_debug_cq(hw, (void *)desc, buf, buf_size); 1102 1103 /* save writeback AQ if requested */ 1104 if (details->wb_desc) 1105 memcpy(details->wb_desc, desc_on_ring, 1106 sizeof(*details->wb_desc)); 1107 1108 /* update the error if time out occurred */ 1109 if (!cmd_completed) { 1110 if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask || 1111 rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) { 1112 ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n"); 1113 status = -EIO; 1114 } else { 1115 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n"); 1116 status = -EIO; 1117 } 1118 } 1119 1120 sq_send_command_error: 1121 mutex_unlock(&cq->sq_lock); 1122 return status; 1123 } 1124 1125 /** 1126 * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function 1127 * @desc: pointer to the temp descriptor (non DMA mem) 1128 * @opcode: the opcode can be used to decide which flags to turn off or on 1129 * 1130 * Fill the desc with default values 1131 */ 1132 void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode) 1133 { 1134 /* zero out the desc */ 1135 memset(desc, 0, sizeof(*desc)); 1136 desc->opcode = cpu_to_le16(opcode); 1137 desc->flags = cpu_to_le16(ICE_AQ_FLAG_SI); 1138 } 1139 1140 /** 1141 * ice_clean_rq_elem 1142 * @hw: pointer to the HW struct 1143 * @cq: pointer to the specific Control queue 1144 * @e: event info from the receive descriptor, includes any buffers 1145 * @pending: number of events that could be left to process 1146 * 1147 * This function cleans one Admin Receive Queue element and returns 1148 * the contents through e. It can also return how many events are 1149 * left to process through 'pending'. 1150 */ 1151 int 1152 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, 1153 struct ice_rq_event_info *e, u16 *pending) 1154 { 1155 u16 ntc = cq->rq.next_to_clean; 1156 enum ice_aq_err rq_last_status; 1157 struct ice_aq_desc *desc; 1158 struct ice_dma_mem *bi; 1159 int ret_code = 0; 1160 u16 desc_idx; 1161 u16 datalen; 1162 u16 flags; 1163 u16 ntu; 1164 1165 /* pre-clean the event info */ 1166 memset(&e->desc, 0, sizeof(e->desc)); 1167 1168 /* take the lock before we start messing with the ring */ 1169 mutex_lock(&cq->rq_lock); 1170 1171 if (!cq->rq.count) { 1172 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n"); 1173 ret_code = -EIO; 1174 goto clean_rq_elem_err; 1175 } 1176 1177 /* set next_to_use to head */ 1178 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); 1179 1180 if (ntu == ntc) { 1181 /* nothing to do - shouldn't need to update ring's values */ 1182 ret_code = -EALREADY; 1183 goto clean_rq_elem_out; 1184 } 1185 1186 /* now clean the next descriptor */ 1187 desc = ICE_CTL_Q_DESC(cq->rq, ntc); 1188 desc_idx = ntc; 1189 1190 rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval); 1191 flags = le16_to_cpu(desc->flags); 1192 if (flags & ICE_AQ_FLAG_ERR) { 1193 ret_code = -EIO; 1194 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n", 1195 le16_to_cpu(desc->opcode), rq_last_status); 1196 } 1197 memcpy(&e->desc, desc, sizeof(e->desc)); 1198 datalen = le16_to_cpu(desc->datalen); 1199 e->msg_len = min_t(u16, datalen, e->buf_len); 1200 if (e->msg_buf && e->msg_len) 1201 memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len); 1202 1203 ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n"); 1204 1205 ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size); 1206 1207 /* Restore the original datalen and buffer address in the desc, 1208 * FW updates datalen to indicate the event message size 1209 */ 1210 bi = &cq->rq.r.rq_bi[ntc]; 1211 memset(desc, 0, sizeof(*desc)); 1212 1213 desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF); 1214 if (cq->rq_buf_size > ICE_AQ_LG_BUF) 1215 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB); 1216 desc->datalen = cpu_to_le16(bi->size); 1217 desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa)); 1218 desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); 1219 1220 /* set tail = the last cleaned desc index. */ 1221 wr32(hw, cq->rq.tail, ntc); 1222 /* ntc is updated to tail + 1 */ 1223 ntc++; 1224 if (ntc == cq->num_rq_entries) 1225 ntc = 0; 1226 cq->rq.next_to_clean = ntc; 1227 cq->rq.next_to_use = ntu; 1228 1229 clean_rq_elem_out: 1230 /* Set pending if needed, unlock and return */ 1231 if (pending) { 1232 /* re-read HW head to calculate actual pending messages */ 1233 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); 1234 *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc)); 1235 } 1236 clean_rq_elem_err: 1237 mutex_unlock(&cq->rq_lock); 1238 1239 return ret_code; 1240 } 1241