1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice_common.h" 5 6 #define ICE_CQ_INIT_REGS(qinfo, prefix) \ 7 do { \ 8 (qinfo)->sq.head = prefix##_ATQH; \ 9 (qinfo)->sq.tail = prefix##_ATQT; \ 10 (qinfo)->sq.len = prefix##_ATQLEN; \ 11 (qinfo)->sq.bah = prefix##_ATQBAH; \ 12 (qinfo)->sq.bal = prefix##_ATQBAL; \ 13 (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \ 14 (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \ 15 (qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M; \ 16 (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \ 17 (qinfo)->rq.head = prefix##_ARQH; \ 18 (qinfo)->rq.tail = prefix##_ARQT; \ 19 (qinfo)->rq.len = prefix##_ARQLEN; \ 20 (qinfo)->rq.bah = prefix##_ARQBAH; \ 21 (qinfo)->rq.bal = prefix##_ARQBAL; \ 22 (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \ 23 (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \ 24 (qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M; \ 25 (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \ 26 } while (0) 27 28 /** 29 * ice_adminq_init_regs - Initialize AdminQ registers 30 * @hw: pointer to the hardware structure 31 * 32 * This assumes the alloc_sq and alloc_rq functions have already been called 33 */ 34 static void ice_adminq_init_regs(struct ice_hw *hw) 35 { 36 struct ice_ctl_q_info *cq = &hw->adminq; 37 38 ICE_CQ_INIT_REGS(cq, PF_FW); 39 } 40 41 /** 42 * ice_mailbox_init_regs - Initialize Mailbox registers 43 * @hw: pointer to the hardware structure 44 * 45 * This assumes the alloc_sq and alloc_rq functions have already been called 46 */ 47 static void ice_mailbox_init_regs(struct ice_hw *hw) 48 { 49 struct ice_ctl_q_info *cq = &hw->mailboxq; 50 51 ICE_CQ_INIT_REGS(cq, PF_MBX); 52 } 53 54 /** 55 * ice_sb_init_regs - Initialize Sideband registers 56 * @hw: pointer to the hardware structure 57 * 58 * This assumes the alloc_sq and alloc_rq functions have already been called 59 */ 60 static void ice_sb_init_regs(struct ice_hw *hw) 61 { 62 struct ice_ctl_q_info *cq = &hw->sbq; 63 64 ICE_CQ_INIT_REGS(cq, PF_SB); 65 } 66 67 /** 68 * ice_check_sq_alive 69 * @hw: pointer to the HW struct 70 * @cq: pointer to the specific Control queue 71 * 72 * Returns true if Queue is enabled else false. 73 */ 74 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq) 75 { 76 /* check both queue-length and queue-enable fields */ 77 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask) 78 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask | 79 cq->sq.len_ena_mask)) == 80 (cq->num_sq_entries | cq->sq.len_ena_mask); 81 82 return false; 83 } 84 85 /** 86 * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings 87 * @hw: pointer to the hardware structure 88 * @cq: pointer to the specific Control queue 89 */ 90 static enum ice_status 91 ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) 92 { 93 size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc); 94 95 cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size, 96 &cq->sq.desc_buf.pa, 97 GFP_KERNEL | __GFP_ZERO); 98 if (!cq->sq.desc_buf.va) 99 return ICE_ERR_NO_MEMORY; 100 cq->sq.desc_buf.size = size; 101 102 cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, 103 sizeof(struct ice_sq_cd), GFP_KERNEL); 104 if (!cq->sq.cmd_buf) { 105 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size, 106 cq->sq.desc_buf.va, cq->sq.desc_buf.pa); 107 cq->sq.desc_buf.va = NULL; 108 cq->sq.desc_buf.pa = 0; 109 cq->sq.desc_buf.size = 0; 110 return ICE_ERR_NO_MEMORY; 111 } 112 113 return 0; 114 } 115 116 /** 117 * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings 118 * @hw: pointer to the hardware structure 119 * @cq: pointer to the specific Control queue 120 */ 121 static enum ice_status 122 ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) 123 { 124 size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc); 125 126 cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size, 127 &cq->rq.desc_buf.pa, 128 GFP_KERNEL | __GFP_ZERO); 129 if (!cq->rq.desc_buf.va) 130 return ICE_ERR_NO_MEMORY; 131 cq->rq.desc_buf.size = size; 132 return 0; 133 } 134 135 /** 136 * ice_free_cq_ring - Free control queue ring 137 * @hw: pointer to the hardware structure 138 * @ring: pointer to the specific control queue ring 139 * 140 * This assumes the posted buffers have already been cleaned 141 * and de-allocated 142 */ 143 static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring) 144 { 145 dmam_free_coherent(ice_hw_to_dev(hw), ring->desc_buf.size, 146 ring->desc_buf.va, ring->desc_buf.pa); 147 ring->desc_buf.va = NULL; 148 ring->desc_buf.pa = 0; 149 ring->desc_buf.size = 0; 150 } 151 152 /** 153 * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ 154 * @hw: pointer to the hardware structure 155 * @cq: pointer to the specific Control queue 156 */ 157 static enum ice_status 158 ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 159 { 160 int i; 161 162 /* We'll be allocating the buffer info memory first, then we can 163 * allocate the mapped buffers for the event processing 164 */ 165 cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries, 166 sizeof(cq->rq.desc_buf), GFP_KERNEL); 167 if (!cq->rq.dma_head) 168 return ICE_ERR_NO_MEMORY; 169 cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head; 170 171 /* allocate the mapped buffers */ 172 for (i = 0; i < cq->num_rq_entries; i++) { 173 struct ice_aq_desc *desc; 174 struct ice_dma_mem *bi; 175 176 bi = &cq->rq.r.rq_bi[i]; 177 bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw), 178 cq->rq_buf_size, &bi->pa, 179 GFP_KERNEL | __GFP_ZERO); 180 if (!bi->va) 181 goto unwind_alloc_rq_bufs; 182 bi->size = cq->rq_buf_size; 183 184 /* now configure the descriptors for use */ 185 desc = ICE_CTL_Q_DESC(cq->rq, i); 186 187 desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF); 188 if (cq->rq_buf_size > ICE_AQ_LG_BUF) 189 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB); 190 desc->opcode = 0; 191 /* This is in accordance with Admin queue design, there is no 192 * register for buffer size configuration 193 */ 194 desc->datalen = cpu_to_le16(bi->size); 195 desc->retval = 0; 196 desc->cookie_high = 0; 197 desc->cookie_low = 0; 198 desc->params.generic.addr_high = 199 cpu_to_le32(upper_32_bits(bi->pa)); 200 desc->params.generic.addr_low = 201 cpu_to_le32(lower_32_bits(bi->pa)); 202 desc->params.generic.param0 = 0; 203 desc->params.generic.param1 = 0; 204 } 205 return 0; 206 207 unwind_alloc_rq_bufs: 208 /* don't try to free the one that failed... */ 209 i--; 210 for (; i >= 0; i--) { 211 dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size, 212 cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa); 213 cq->rq.r.rq_bi[i].va = NULL; 214 cq->rq.r.rq_bi[i].pa = 0; 215 cq->rq.r.rq_bi[i].size = 0; 216 } 217 cq->rq.r.rq_bi = NULL; 218 devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head); 219 cq->rq.dma_head = NULL; 220 221 return ICE_ERR_NO_MEMORY; 222 } 223 224 /** 225 * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ 226 * @hw: pointer to the hardware structure 227 * @cq: pointer to the specific Control queue 228 */ 229 static enum ice_status 230 ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 231 { 232 int i; 233 234 /* No mapped memory needed yet, just the buffer info structures */ 235 cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, 236 sizeof(cq->sq.desc_buf), GFP_KERNEL); 237 if (!cq->sq.dma_head) 238 return ICE_ERR_NO_MEMORY; 239 cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head; 240 241 /* allocate the mapped buffers */ 242 for (i = 0; i < cq->num_sq_entries; i++) { 243 struct ice_dma_mem *bi; 244 245 bi = &cq->sq.r.sq_bi[i]; 246 bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw), 247 cq->sq_buf_size, &bi->pa, 248 GFP_KERNEL | __GFP_ZERO); 249 if (!bi->va) 250 goto unwind_alloc_sq_bufs; 251 bi->size = cq->sq_buf_size; 252 } 253 return 0; 254 255 unwind_alloc_sq_bufs: 256 /* don't try to free the one that failed... */ 257 i--; 258 for (; i >= 0; i--) { 259 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.r.sq_bi[i].size, 260 cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa); 261 cq->sq.r.sq_bi[i].va = NULL; 262 cq->sq.r.sq_bi[i].pa = 0; 263 cq->sq.r.sq_bi[i].size = 0; 264 } 265 cq->sq.r.sq_bi = NULL; 266 devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head); 267 cq->sq.dma_head = NULL; 268 269 return ICE_ERR_NO_MEMORY; 270 } 271 272 static enum ice_status 273 ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries) 274 { 275 /* Clear Head and Tail */ 276 wr32(hw, ring->head, 0); 277 wr32(hw, ring->tail, 0); 278 279 /* set starting point */ 280 wr32(hw, ring->len, (num_entries | ring->len_ena_mask)); 281 wr32(hw, ring->bal, lower_32_bits(ring->desc_buf.pa)); 282 wr32(hw, ring->bah, upper_32_bits(ring->desc_buf.pa)); 283 284 /* Check one register to verify that config was applied */ 285 if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa)) 286 return ICE_ERR_AQ_ERROR; 287 288 return 0; 289 } 290 291 /** 292 * ice_cfg_sq_regs - configure Control ATQ registers 293 * @hw: pointer to the hardware structure 294 * @cq: pointer to the specific Control queue 295 * 296 * Configure base address and length registers for the transmit queue 297 */ 298 static enum ice_status 299 ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 300 { 301 return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries); 302 } 303 304 /** 305 * ice_cfg_rq_regs - configure Control ARQ register 306 * @hw: pointer to the hardware structure 307 * @cq: pointer to the specific Control queue 308 * 309 * Configure base address and length registers for the receive (event queue) 310 */ 311 static enum ice_status 312 ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 313 { 314 enum ice_status status; 315 316 status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries); 317 if (status) 318 return status; 319 320 /* Update tail in the HW to post pre-allocated buffers */ 321 wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1)); 322 323 return 0; 324 } 325 326 #define ICE_FREE_CQ_BUFS(hw, qi, ring) \ 327 do { \ 328 /* free descriptors */ \ 329 if ((qi)->ring.r.ring##_bi) { \ 330 int i; \ 331 \ 332 for (i = 0; i < (qi)->num_##ring##_entries; i++) \ 333 if ((qi)->ring.r.ring##_bi[i].pa) { \ 334 dmam_free_coherent(ice_hw_to_dev(hw), \ 335 (qi)->ring.r.ring##_bi[i].size, \ 336 (qi)->ring.r.ring##_bi[i].va, \ 337 (qi)->ring.r.ring##_bi[i].pa); \ 338 (qi)->ring.r.ring##_bi[i].va = NULL;\ 339 (qi)->ring.r.ring##_bi[i].pa = 0;\ 340 (qi)->ring.r.ring##_bi[i].size = 0;\ 341 } \ 342 } \ 343 /* free the buffer info list */ \ 344 if ((qi)->ring.cmd_buf) \ 345 devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \ 346 /* free DMA head */ \ 347 devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \ 348 } while (0) 349 350 /** 351 * ice_init_sq - main initialization routine for Control ATQ 352 * @hw: pointer to the hardware structure 353 * @cq: pointer to the specific Control queue 354 * 355 * This is the main initialization routine for the Control Send Queue 356 * Prior to calling this function, the driver *MUST* set the following fields 357 * in the cq->structure: 358 * - cq->num_sq_entries 359 * - cq->sq_buf_size 360 * 361 * Do *NOT* hold the lock when calling this as the memory allocation routines 362 * called are not going to be atomic context safe 363 */ 364 static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 365 { 366 enum ice_status ret_code; 367 368 if (cq->sq.count > 0) { 369 /* queue already initialized */ 370 ret_code = ICE_ERR_NOT_READY; 371 goto init_ctrlq_exit; 372 } 373 374 /* verify input for valid configuration */ 375 if (!cq->num_sq_entries || !cq->sq_buf_size) { 376 ret_code = ICE_ERR_CFG; 377 goto init_ctrlq_exit; 378 } 379 380 cq->sq.next_to_use = 0; 381 cq->sq.next_to_clean = 0; 382 383 /* allocate the ring memory */ 384 ret_code = ice_alloc_ctrlq_sq_ring(hw, cq); 385 if (ret_code) 386 goto init_ctrlq_exit; 387 388 /* allocate buffers in the rings */ 389 ret_code = ice_alloc_sq_bufs(hw, cq); 390 if (ret_code) 391 goto init_ctrlq_free_rings; 392 393 /* initialize base registers */ 394 ret_code = ice_cfg_sq_regs(hw, cq); 395 if (ret_code) 396 goto init_ctrlq_free_rings; 397 398 /* success! */ 399 cq->sq.count = cq->num_sq_entries; 400 goto init_ctrlq_exit; 401 402 init_ctrlq_free_rings: 403 ICE_FREE_CQ_BUFS(hw, cq, sq); 404 ice_free_cq_ring(hw, &cq->sq); 405 406 init_ctrlq_exit: 407 return ret_code; 408 } 409 410 /** 411 * ice_init_rq - initialize ARQ 412 * @hw: pointer to the hardware structure 413 * @cq: pointer to the specific Control queue 414 * 415 * The main initialization routine for the Admin Receive (Event) Queue. 416 * Prior to calling this function, the driver *MUST* set the following fields 417 * in the cq->structure: 418 * - cq->num_rq_entries 419 * - cq->rq_buf_size 420 * 421 * Do *NOT* hold the lock when calling this as the memory allocation routines 422 * called are not going to be atomic context safe 423 */ 424 static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 425 { 426 enum ice_status ret_code; 427 428 if (cq->rq.count > 0) { 429 /* queue already initialized */ 430 ret_code = ICE_ERR_NOT_READY; 431 goto init_ctrlq_exit; 432 } 433 434 /* verify input for valid configuration */ 435 if (!cq->num_rq_entries || !cq->rq_buf_size) { 436 ret_code = ICE_ERR_CFG; 437 goto init_ctrlq_exit; 438 } 439 440 cq->rq.next_to_use = 0; 441 cq->rq.next_to_clean = 0; 442 443 /* allocate the ring memory */ 444 ret_code = ice_alloc_ctrlq_rq_ring(hw, cq); 445 if (ret_code) 446 goto init_ctrlq_exit; 447 448 /* allocate buffers in the rings */ 449 ret_code = ice_alloc_rq_bufs(hw, cq); 450 if (ret_code) 451 goto init_ctrlq_free_rings; 452 453 /* initialize base registers */ 454 ret_code = ice_cfg_rq_regs(hw, cq); 455 if (ret_code) 456 goto init_ctrlq_free_rings; 457 458 /* success! */ 459 cq->rq.count = cq->num_rq_entries; 460 goto init_ctrlq_exit; 461 462 init_ctrlq_free_rings: 463 ICE_FREE_CQ_BUFS(hw, cq, rq); 464 ice_free_cq_ring(hw, &cq->rq); 465 466 init_ctrlq_exit: 467 return ret_code; 468 } 469 470 /** 471 * ice_shutdown_sq - shutdown the Control ATQ 472 * @hw: pointer to the hardware structure 473 * @cq: pointer to the specific Control queue 474 * 475 * The main shutdown routine for the Control Transmit Queue 476 */ 477 static enum ice_status 478 ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 479 { 480 enum ice_status ret_code = 0; 481 482 mutex_lock(&cq->sq_lock); 483 484 if (!cq->sq.count) { 485 ret_code = ICE_ERR_NOT_READY; 486 goto shutdown_sq_out; 487 } 488 489 /* Stop firmware AdminQ processing */ 490 wr32(hw, cq->sq.head, 0); 491 wr32(hw, cq->sq.tail, 0); 492 wr32(hw, cq->sq.len, 0); 493 wr32(hw, cq->sq.bal, 0); 494 wr32(hw, cq->sq.bah, 0); 495 496 cq->sq.count = 0; /* to indicate uninitialized queue */ 497 498 /* free ring buffers and the ring itself */ 499 ICE_FREE_CQ_BUFS(hw, cq, sq); 500 ice_free_cq_ring(hw, &cq->sq); 501 502 shutdown_sq_out: 503 mutex_unlock(&cq->sq_lock); 504 return ret_code; 505 } 506 507 /** 508 * ice_aq_ver_check - Check the reported AQ API version. 509 * @hw: pointer to the hardware structure 510 * 511 * Checks if the driver should load on a given AQ API version. 512 * 513 * Return: 'true' iff the driver should attempt to load. 'false' otherwise. 514 */ 515 static bool ice_aq_ver_check(struct ice_hw *hw) 516 { 517 if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) { 518 /* Major API version is newer than expected, don't load */ 519 dev_warn(ice_hw_to_dev(hw), 520 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); 521 return false; 522 } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) { 523 if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2)) 524 dev_info(ice_hw_to_dev(hw), 525 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); 526 else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR) 527 dev_info(ice_hw_to_dev(hw), 528 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); 529 } else { 530 /* Major API version is older than expected, log a warning */ 531 dev_info(ice_hw_to_dev(hw), 532 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); 533 } 534 return true; 535 } 536 537 /** 538 * ice_shutdown_rq - shutdown Control ARQ 539 * @hw: pointer to the hardware structure 540 * @cq: pointer to the specific Control queue 541 * 542 * The main shutdown routine for the Control Receive Queue 543 */ 544 static enum ice_status 545 ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 546 { 547 enum ice_status ret_code = 0; 548 549 mutex_lock(&cq->rq_lock); 550 551 if (!cq->rq.count) { 552 ret_code = ICE_ERR_NOT_READY; 553 goto shutdown_rq_out; 554 } 555 556 /* Stop Control Queue processing */ 557 wr32(hw, cq->rq.head, 0); 558 wr32(hw, cq->rq.tail, 0); 559 wr32(hw, cq->rq.len, 0); 560 wr32(hw, cq->rq.bal, 0); 561 wr32(hw, cq->rq.bah, 0); 562 563 /* set rq.count to 0 to indicate uninitialized queue */ 564 cq->rq.count = 0; 565 566 /* free ring buffers and the ring itself */ 567 ICE_FREE_CQ_BUFS(hw, cq, rq); 568 ice_free_cq_ring(hw, &cq->rq); 569 570 shutdown_rq_out: 571 mutex_unlock(&cq->rq_lock); 572 return ret_code; 573 } 574 575 /** 576 * ice_init_check_adminq - Check version for Admin Queue to know if its alive 577 * @hw: pointer to the hardware structure 578 */ 579 static enum ice_status ice_init_check_adminq(struct ice_hw *hw) 580 { 581 struct ice_ctl_q_info *cq = &hw->adminq; 582 enum ice_status status; 583 584 status = ice_aq_get_fw_ver(hw, NULL); 585 if (status) 586 goto init_ctrlq_free_rq; 587 588 if (!ice_aq_ver_check(hw)) { 589 status = ICE_ERR_FW_API_VER; 590 goto init_ctrlq_free_rq; 591 } 592 593 return 0; 594 595 init_ctrlq_free_rq: 596 ice_shutdown_rq(hw, cq); 597 ice_shutdown_sq(hw, cq); 598 return status; 599 } 600 601 /** 602 * ice_init_ctrlq - main initialization routine for any control Queue 603 * @hw: pointer to the hardware structure 604 * @q_type: specific Control queue type 605 * 606 * Prior to calling this function, the driver *MUST* set the following fields 607 * in the cq->structure: 608 * - cq->num_sq_entries 609 * - cq->num_rq_entries 610 * - cq->rq_buf_size 611 * - cq->sq_buf_size 612 * 613 * NOTE: this function does not initialize the controlq locks 614 */ 615 static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) 616 { 617 struct ice_ctl_q_info *cq; 618 enum ice_status ret_code; 619 620 switch (q_type) { 621 case ICE_CTL_Q_ADMIN: 622 ice_adminq_init_regs(hw); 623 cq = &hw->adminq; 624 break; 625 case ICE_CTL_Q_SB: 626 ice_sb_init_regs(hw); 627 cq = &hw->sbq; 628 break; 629 case ICE_CTL_Q_MAILBOX: 630 ice_mailbox_init_regs(hw); 631 cq = &hw->mailboxq; 632 break; 633 default: 634 return ICE_ERR_PARAM; 635 } 636 cq->qtype = q_type; 637 638 /* verify input for valid configuration */ 639 if (!cq->num_rq_entries || !cq->num_sq_entries || 640 !cq->rq_buf_size || !cq->sq_buf_size) { 641 return ICE_ERR_CFG; 642 } 643 644 /* setup SQ command write back timeout */ 645 cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT; 646 647 /* allocate the ATQ */ 648 ret_code = ice_init_sq(hw, cq); 649 if (ret_code) 650 return ret_code; 651 652 /* allocate the ARQ */ 653 ret_code = ice_init_rq(hw, cq); 654 if (ret_code) 655 goto init_ctrlq_free_sq; 656 657 /* success! */ 658 return 0; 659 660 init_ctrlq_free_sq: 661 ice_shutdown_sq(hw, cq); 662 return ret_code; 663 } 664 665 /** 666 * ice_is_sbq_supported - is the sideband queue supported 667 * @hw: pointer to the hardware structure 668 * 669 * Returns true if the sideband control queue interface is 670 * supported for the device, false otherwise 671 */ 672 bool ice_is_sbq_supported(struct ice_hw *hw) 673 { 674 /* The device sideband queue is only supported on devices with the 675 * generic MAC type. 676 */ 677 return hw->mac_type == ICE_MAC_GENERIC; 678 } 679 680 /** 681 * ice_get_sbq - returns the right control queue to use for sideband 682 * @hw: pointer to the hardware structure 683 */ 684 struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw) 685 { 686 if (ice_is_sbq_supported(hw)) 687 return &hw->sbq; 688 return &hw->adminq; 689 } 690 691 /** 692 * ice_shutdown_ctrlq - shutdown routine for any control queue 693 * @hw: pointer to the hardware structure 694 * @q_type: specific Control queue type 695 * 696 * NOTE: this function does not destroy the control queue locks. 697 */ 698 static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) 699 { 700 struct ice_ctl_q_info *cq; 701 702 switch (q_type) { 703 case ICE_CTL_Q_ADMIN: 704 cq = &hw->adminq; 705 if (ice_check_sq_alive(hw, cq)) 706 ice_aq_q_shutdown(hw, true); 707 break; 708 case ICE_CTL_Q_SB: 709 cq = &hw->sbq; 710 break; 711 case ICE_CTL_Q_MAILBOX: 712 cq = &hw->mailboxq; 713 break; 714 default: 715 return; 716 } 717 718 ice_shutdown_sq(hw, cq); 719 ice_shutdown_rq(hw, cq); 720 } 721 722 /** 723 * ice_shutdown_all_ctrlq - shutdown routine for all control queues 724 * @hw: pointer to the hardware structure 725 * 726 * NOTE: this function does not destroy the control queue locks. The driver 727 * may call this at runtime to shutdown and later restart control queues, such 728 * as in response to a reset event. 729 */ 730 void ice_shutdown_all_ctrlq(struct ice_hw *hw) 731 { 732 /* Shutdown FW admin queue */ 733 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); 734 /* Shutdown PHY Sideband */ 735 if (ice_is_sbq_supported(hw)) 736 ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB); 737 /* Shutdown PF-VF Mailbox */ 738 ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX); 739 } 740 741 /** 742 * ice_init_all_ctrlq - main initialization routine for all control queues 743 * @hw: pointer to the hardware structure 744 * 745 * Prior to calling this function, the driver MUST* set the following fields 746 * in the cq->structure for all control queues: 747 * - cq->num_sq_entries 748 * - cq->num_rq_entries 749 * - cq->rq_buf_size 750 * - cq->sq_buf_size 751 * 752 * NOTE: this function does not initialize the controlq locks. 753 */ 754 enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) 755 { 756 enum ice_status status; 757 u32 retry = 0; 758 759 /* Init FW admin queue */ 760 do { 761 status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN); 762 if (status) 763 return status; 764 765 status = ice_init_check_adminq(hw); 766 if (status != ICE_ERR_AQ_FW_CRITICAL) 767 break; 768 769 ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n"); 770 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); 771 msleep(ICE_CTL_Q_ADMIN_INIT_MSEC); 772 } while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT); 773 774 if (status) 775 return status; 776 /* sideband control queue (SBQ) interface is not supported on some 777 * devices. Initialize if supported, else fallback to the admin queue 778 * interface 779 */ 780 if (ice_is_sbq_supported(hw)) { 781 status = ice_init_ctrlq(hw, ICE_CTL_Q_SB); 782 if (status) 783 return status; 784 } 785 /* Init Mailbox queue */ 786 return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX); 787 } 788 789 /** 790 * ice_init_ctrlq_locks - Initialize locks for a control queue 791 * @cq: pointer to the control queue 792 * 793 * Initializes the send and receive queue locks for a given control queue. 794 */ 795 static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq) 796 { 797 mutex_init(&cq->sq_lock); 798 mutex_init(&cq->rq_lock); 799 } 800 801 /** 802 * ice_create_all_ctrlq - main initialization routine for all control queues 803 * @hw: pointer to the hardware structure 804 * 805 * Prior to calling this function, the driver *MUST* set the following fields 806 * in the cq->structure for all control queues: 807 * - cq->num_sq_entries 808 * - cq->num_rq_entries 809 * - cq->rq_buf_size 810 * - cq->sq_buf_size 811 * 812 * This function creates all the control queue locks and then calls 813 * ice_init_all_ctrlq. It should be called once during driver load. If the 814 * driver needs to re-initialize control queues at run time it should call 815 * ice_init_all_ctrlq instead. 816 */ 817 enum ice_status ice_create_all_ctrlq(struct ice_hw *hw) 818 { 819 ice_init_ctrlq_locks(&hw->adminq); 820 if (ice_is_sbq_supported(hw)) 821 ice_init_ctrlq_locks(&hw->sbq); 822 ice_init_ctrlq_locks(&hw->mailboxq); 823 824 return ice_init_all_ctrlq(hw); 825 } 826 827 /** 828 * ice_destroy_ctrlq_locks - Destroy locks for a control queue 829 * @cq: pointer to the control queue 830 * 831 * Destroys the send and receive queue locks for a given control queue. 832 */ 833 static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq) 834 { 835 mutex_destroy(&cq->sq_lock); 836 mutex_destroy(&cq->rq_lock); 837 } 838 839 /** 840 * ice_destroy_all_ctrlq - exit routine for all control queues 841 * @hw: pointer to the hardware structure 842 * 843 * This function shuts down all the control queues and then destroys the 844 * control queue locks. It should be called once during driver unload. The 845 * driver should call ice_shutdown_all_ctrlq if it needs to shut down and 846 * reinitialize control queues, such as in response to a reset event. 847 */ 848 void ice_destroy_all_ctrlq(struct ice_hw *hw) 849 { 850 /* shut down all the control queues first */ 851 ice_shutdown_all_ctrlq(hw); 852 853 ice_destroy_ctrlq_locks(&hw->adminq); 854 if (ice_is_sbq_supported(hw)) 855 ice_destroy_ctrlq_locks(&hw->sbq); 856 ice_destroy_ctrlq_locks(&hw->mailboxq); 857 } 858 859 /** 860 * ice_clean_sq - cleans Admin send queue (ATQ) 861 * @hw: pointer to the hardware structure 862 * @cq: pointer to the specific Control queue 863 * 864 * returns the number of free desc 865 */ 866 static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 867 { 868 struct ice_ctl_q_ring *sq = &cq->sq; 869 u16 ntc = sq->next_to_clean; 870 struct ice_sq_cd *details; 871 struct ice_aq_desc *desc; 872 873 desc = ICE_CTL_Q_DESC(*sq, ntc); 874 details = ICE_CTL_Q_DETAILS(*sq, ntc); 875 876 while (rd32(hw, cq->sq.head) != ntc) { 877 ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head)); 878 memset(desc, 0, sizeof(*desc)); 879 memset(details, 0, sizeof(*details)); 880 ntc++; 881 if (ntc == sq->count) 882 ntc = 0; 883 desc = ICE_CTL_Q_DESC(*sq, ntc); 884 details = ICE_CTL_Q_DETAILS(*sq, ntc); 885 } 886 887 sq->next_to_clean = ntc; 888 889 return ICE_CTL_Q_DESC_UNUSED(sq); 890 } 891 892 /** 893 * ice_debug_cq 894 * @hw: pointer to the hardware structure 895 * @desc: pointer to control queue descriptor 896 * @buf: pointer to command buffer 897 * @buf_len: max length of buf 898 * 899 * Dumps debug log about control command with descriptor contents. 900 */ 901 static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len) 902 { 903 struct ice_aq_desc *cq_desc = desc; 904 u16 len; 905 906 if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) && 907 !((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask)) 908 return; 909 910 if (!desc) 911 return; 912 913 len = le16_to_cpu(cq_desc->datalen); 914 915 ice_debug(hw, ICE_DBG_AQ_DESC, "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", 916 le16_to_cpu(cq_desc->opcode), 917 le16_to_cpu(cq_desc->flags), 918 le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval)); 919 ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n", 920 le32_to_cpu(cq_desc->cookie_high), 921 le32_to_cpu(cq_desc->cookie_low)); 922 ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1) 0x%08X 0x%08X\n", 923 le32_to_cpu(cq_desc->params.generic.param0), 924 le32_to_cpu(cq_desc->params.generic.param1)); 925 ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l) 0x%08X 0x%08X\n", 926 le32_to_cpu(cq_desc->params.generic.addr_high), 927 le32_to_cpu(cq_desc->params.generic.addr_low)); 928 if (buf && cq_desc->datalen != 0) { 929 ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n"); 930 if (buf_len < len) 931 len = buf_len; 932 933 ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, buf, len); 934 } 935 } 936 937 /** 938 * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ) 939 * @hw: pointer to the HW struct 940 * @cq: pointer to the specific Control queue 941 * 942 * Returns true if the firmware has processed all descriptors on the 943 * admin send queue. Returns false if there are still requests pending. 944 */ 945 static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq) 946 { 947 /* AQ designers suggest use of head for better 948 * timing reliability than DD bit 949 */ 950 return rd32(hw, cq->sq.head) == cq->sq.next_to_use; 951 } 952 953 /** 954 * ice_sq_send_cmd - send command to Control Queue (ATQ) 955 * @hw: pointer to the HW struct 956 * @cq: pointer to the specific Control queue 957 * @desc: prefilled descriptor describing the command 958 * @buf: buffer to use for indirect commands (or NULL for direct commands) 959 * @buf_size: size of buffer for indirect commands (or 0 for direct commands) 960 * @cd: pointer to command details structure 961 * 962 * This is the main send command routine for the ATQ. It runs the queue, 963 * cleans the queue, etc. 964 */ 965 enum ice_status 966 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, 967 struct ice_aq_desc *desc, void *buf, u16 buf_size, 968 struct ice_sq_cd *cd) 969 { 970 struct ice_dma_mem *dma_buf = NULL; 971 struct ice_aq_desc *desc_on_ring; 972 bool cmd_completed = false; 973 enum ice_status status = 0; 974 struct ice_sq_cd *details; 975 u32 total_delay = 0; 976 u16 retval = 0; 977 u32 val = 0; 978 979 /* if reset is in progress return a soft error */ 980 if (hw->reset_ongoing) 981 return ICE_ERR_RESET_ONGOING; 982 mutex_lock(&cq->sq_lock); 983 984 cq->sq_last_status = ICE_AQ_RC_OK; 985 986 if (!cq->sq.count) { 987 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n"); 988 status = ICE_ERR_AQ_EMPTY; 989 goto sq_send_command_error; 990 } 991 992 if ((buf && !buf_size) || (!buf && buf_size)) { 993 status = ICE_ERR_PARAM; 994 goto sq_send_command_error; 995 } 996 997 if (buf) { 998 if (buf_size > cq->sq_buf_size) { 999 ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n", 1000 buf_size); 1001 status = ICE_ERR_INVAL_SIZE; 1002 goto sq_send_command_error; 1003 } 1004 1005 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_BUF); 1006 if (buf_size > ICE_AQ_LG_BUF) 1007 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB); 1008 } 1009 1010 val = rd32(hw, cq->sq.head); 1011 if (val >= cq->num_sq_entries) { 1012 ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n", 1013 val); 1014 status = ICE_ERR_AQ_EMPTY; 1015 goto sq_send_command_error; 1016 } 1017 1018 details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use); 1019 if (cd) 1020 *details = *cd; 1021 else 1022 memset(details, 0, sizeof(*details)); 1023 1024 /* Call clean and check queue available function to reclaim the 1025 * descriptors that were processed by FW/MBX; the function returns the 1026 * number of desc available. The clean function called here could be 1027 * called in a separate thread in case of asynchronous completions. 1028 */ 1029 if (ice_clean_sq(hw, cq) == 0) { 1030 ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n"); 1031 status = ICE_ERR_AQ_FULL; 1032 goto sq_send_command_error; 1033 } 1034 1035 /* initialize the temp desc pointer with the right desc */ 1036 desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use); 1037 1038 /* if the desc is available copy the temp desc to the right place */ 1039 memcpy(desc_on_ring, desc, sizeof(*desc_on_ring)); 1040 1041 /* if buf is not NULL assume indirect command */ 1042 if (buf) { 1043 dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use]; 1044 /* copy the user buf into the respective DMA buf */ 1045 memcpy(dma_buf->va, buf, buf_size); 1046 desc_on_ring->datalen = cpu_to_le16(buf_size); 1047 1048 /* Update the address values in the desc with the pa value 1049 * for respective buffer 1050 */ 1051 desc_on_ring->params.generic.addr_high = 1052 cpu_to_le32(upper_32_bits(dma_buf->pa)); 1053 desc_on_ring->params.generic.addr_low = 1054 cpu_to_le32(lower_32_bits(dma_buf->pa)); 1055 } 1056 1057 /* Debug desc and buffer */ 1058 ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n"); 1059 1060 ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size); 1061 1062 (cq->sq.next_to_use)++; 1063 if (cq->sq.next_to_use == cq->sq.count) 1064 cq->sq.next_to_use = 0; 1065 wr32(hw, cq->sq.tail, cq->sq.next_to_use); 1066 1067 do { 1068 if (ice_sq_done(hw, cq)) 1069 break; 1070 1071 udelay(ICE_CTL_Q_SQ_CMD_USEC); 1072 total_delay++; 1073 } while (total_delay < cq->sq_cmd_timeout); 1074 1075 /* if ready, copy the desc back to temp */ 1076 if (ice_sq_done(hw, cq)) { 1077 memcpy(desc, desc_on_ring, sizeof(*desc)); 1078 if (buf) { 1079 /* get returned length to copy */ 1080 u16 copy_size = le16_to_cpu(desc->datalen); 1081 1082 if (copy_size > buf_size) { 1083 ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n", 1084 copy_size, buf_size); 1085 status = ICE_ERR_AQ_ERROR; 1086 } else { 1087 memcpy(buf, dma_buf->va, copy_size); 1088 } 1089 } 1090 retval = le16_to_cpu(desc->retval); 1091 if (retval) { 1092 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue command 0x%04X completed with error 0x%X\n", 1093 le16_to_cpu(desc->opcode), 1094 retval); 1095 1096 /* strip off FW internal code */ 1097 retval &= 0xff; 1098 } 1099 cmd_completed = true; 1100 if (!status && retval != ICE_AQ_RC_OK) 1101 status = ICE_ERR_AQ_ERROR; 1102 cq->sq_last_status = (enum ice_aq_err)retval; 1103 } 1104 1105 ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n"); 1106 1107 ice_debug_cq(hw, (void *)desc, buf, buf_size); 1108 1109 /* save writeback AQ if requested */ 1110 if (details->wb_desc) 1111 memcpy(details->wb_desc, desc_on_ring, 1112 sizeof(*details->wb_desc)); 1113 1114 /* update the error if time out occurred */ 1115 if (!cmd_completed) { 1116 if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask || 1117 rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) { 1118 ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n"); 1119 status = ICE_ERR_AQ_FW_CRITICAL; 1120 } else { 1121 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n"); 1122 status = ICE_ERR_AQ_TIMEOUT; 1123 } 1124 } 1125 1126 sq_send_command_error: 1127 mutex_unlock(&cq->sq_lock); 1128 return status; 1129 } 1130 1131 /** 1132 * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function 1133 * @desc: pointer to the temp descriptor (non DMA mem) 1134 * @opcode: the opcode can be used to decide which flags to turn off or on 1135 * 1136 * Fill the desc with default values 1137 */ 1138 void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode) 1139 { 1140 /* zero out the desc */ 1141 memset(desc, 0, sizeof(*desc)); 1142 desc->opcode = cpu_to_le16(opcode); 1143 desc->flags = cpu_to_le16(ICE_AQ_FLAG_SI); 1144 } 1145 1146 /** 1147 * ice_clean_rq_elem 1148 * @hw: pointer to the HW struct 1149 * @cq: pointer to the specific Control queue 1150 * @e: event info from the receive descriptor, includes any buffers 1151 * @pending: number of events that could be left to process 1152 * 1153 * This function cleans one Admin Receive Queue element and returns 1154 * the contents through e. It can also return how many events are 1155 * left to process through 'pending'. 1156 */ 1157 enum ice_status 1158 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, 1159 struct ice_rq_event_info *e, u16 *pending) 1160 { 1161 u16 ntc = cq->rq.next_to_clean; 1162 enum ice_aq_err rq_last_status; 1163 enum ice_status ret_code = 0; 1164 struct ice_aq_desc *desc; 1165 struct ice_dma_mem *bi; 1166 u16 desc_idx; 1167 u16 datalen; 1168 u16 flags; 1169 u16 ntu; 1170 1171 /* pre-clean the event info */ 1172 memset(&e->desc, 0, sizeof(e->desc)); 1173 1174 /* take the lock before we start messing with the ring */ 1175 mutex_lock(&cq->rq_lock); 1176 1177 if (!cq->rq.count) { 1178 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n"); 1179 ret_code = ICE_ERR_AQ_EMPTY; 1180 goto clean_rq_elem_err; 1181 } 1182 1183 /* set next_to_use to head */ 1184 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); 1185 1186 if (ntu == ntc) { 1187 /* nothing to do - shouldn't need to update ring's values */ 1188 ret_code = ICE_ERR_AQ_NO_WORK; 1189 goto clean_rq_elem_out; 1190 } 1191 1192 /* now clean the next descriptor */ 1193 desc = ICE_CTL_Q_DESC(cq->rq, ntc); 1194 desc_idx = ntc; 1195 1196 rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval); 1197 flags = le16_to_cpu(desc->flags); 1198 if (flags & ICE_AQ_FLAG_ERR) { 1199 ret_code = ICE_ERR_AQ_ERROR; 1200 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n", 1201 le16_to_cpu(desc->opcode), rq_last_status); 1202 } 1203 memcpy(&e->desc, desc, sizeof(e->desc)); 1204 datalen = le16_to_cpu(desc->datalen); 1205 e->msg_len = min_t(u16, datalen, e->buf_len); 1206 if (e->msg_buf && e->msg_len) 1207 memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len); 1208 1209 ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n"); 1210 1211 ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size); 1212 1213 /* Restore the original datalen and buffer address in the desc, 1214 * FW updates datalen to indicate the event message size 1215 */ 1216 bi = &cq->rq.r.rq_bi[ntc]; 1217 memset(desc, 0, sizeof(*desc)); 1218 1219 desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF); 1220 if (cq->rq_buf_size > ICE_AQ_LG_BUF) 1221 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB); 1222 desc->datalen = cpu_to_le16(bi->size); 1223 desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa)); 1224 desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); 1225 1226 /* set tail = the last cleaned desc index. */ 1227 wr32(hw, cq->rq.tail, ntc); 1228 /* ntc is updated to tail + 1 */ 1229 ntc++; 1230 if (ntc == cq->num_rq_entries) 1231 ntc = 0; 1232 cq->rq.next_to_clean = ntc; 1233 cq->rq.next_to_use = ntu; 1234 1235 clean_rq_elem_out: 1236 /* Set pending if needed, unlock and return */ 1237 if (pending) { 1238 /* re-read HW head to calculate actual pending messages */ 1239 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); 1240 *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc)); 1241 } 1242 clean_rq_elem_err: 1243 mutex_unlock(&cq->rq_lock); 1244 1245 return ret_code; 1246 } 1247