1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice_common.h" 5 6 /** 7 * ice_adminq_init_regs - Initialize AdminQ registers 8 * @hw: pointer to the hardware structure 9 * 10 * This assumes the alloc_sq and alloc_rq functions have already been called 11 */ 12 static void ice_adminq_init_regs(struct ice_hw *hw) 13 { 14 struct ice_ctl_q_info *cq = &hw->adminq; 15 16 cq->sq.head = PF_FW_ATQH; 17 cq->sq.tail = PF_FW_ATQT; 18 cq->sq.len = PF_FW_ATQLEN; 19 cq->sq.bah = PF_FW_ATQBAH; 20 cq->sq.bal = PF_FW_ATQBAL; 21 cq->sq.len_mask = PF_FW_ATQLEN_ATQLEN_M; 22 cq->sq.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M; 23 cq->sq.head_mask = PF_FW_ATQH_ATQH_M; 24 25 cq->rq.head = PF_FW_ARQH; 26 cq->rq.tail = PF_FW_ARQT; 27 cq->rq.len = PF_FW_ARQLEN; 28 cq->rq.bah = PF_FW_ARQBAH; 29 cq->rq.bal = PF_FW_ARQBAL; 30 cq->rq.len_mask = PF_FW_ARQLEN_ARQLEN_M; 31 cq->rq.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M; 32 cq->rq.head_mask = PF_FW_ARQH_ARQH_M; 33 } 34 35 /** 36 * ice_check_sq_alive 37 * @hw: pointer to the hw struct 38 * @cq: pointer to the specific Control queue 39 * 40 * Returns true if Queue is enabled else false. 41 */ 42 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq) 43 { 44 /* check both queue-length and queue-enable fields */ 45 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask) 46 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask | 47 cq->sq.len_ena_mask)) == 48 (cq->num_sq_entries | cq->sq.len_ena_mask); 49 50 return false; 51 } 52 53 /** 54 * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings 55 * @hw: pointer to the hardware structure 56 * @cq: pointer to the specific Control queue 57 */ 58 static enum ice_status 59 ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) 60 { 61 size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc); 62 63 cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size, 64 &cq->sq.desc_buf.pa, 65 GFP_KERNEL | __GFP_ZERO); 66 if (!cq->sq.desc_buf.va) 67 return ICE_ERR_NO_MEMORY; 68 cq->sq.desc_buf.size = size; 69 70 cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, 71 sizeof(struct ice_sq_cd), GFP_KERNEL); 72 if (!cq->sq.cmd_buf) { 73 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size, 74 cq->sq.desc_buf.va, cq->sq.desc_buf.pa); 75 cq->sq.desc_buf.va = NULL; 76 cq->sq.desc_buf.pa = 0; 77 cq->sq.desc_buf.size = 0; 78 return ICE_ERR_NO_MEMORY; 79 } 80 81 return 0; 82 } 83 84 /** 85 * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings 86 * @hw: pointer to the hardware structure 87 * @cq: pointer to the specific Control queue 88 */ 89 static enum ice_status 90 ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) 91 { 92 size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc); 93 94 cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size, 95 &cq->rq.desc_buf.pa, 96 GFP_KERNEL | __GFP_ZERO); 97 if (!cq->rq.desc_buf.va) 98 return ICE_ERR_NO_MEMORY; 99 cq->rq.desc_buf.size = size; 100 return 0; 101 } 102 103 /** 104 * ice_free_ctrlq_sq_ring - Free Control Transmit Queue (ATQ) rings 105 * @hw: pointer to the hardware structure 106 * @cq: pointer to the specific Control queue 107 * 108 * This assumes the posted send buffers have already been cleaned 109 * and de-allocated 110 */ 111 static void ice_free_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) 112 { 113 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size, 114 cq->sq.desc_buf.va, cq->sq.desc_buf.pa); 115 cq->sq.desc_buf.va = NULL; 116 cq->sq.desc_buf.pa = 0; 117 cq->sq.desc_buf.size = 0; 118 } 119 120 /** 121 * ice_free_ctrlq_rq_ring - Free Control Receive Queue (ARQ) rings 122 * @hw: pointer to the hardware structure 123 * @cq: pointer to the specific Control queue 124 * 125 * This assumes the posted receive buffers have already been cleaned 126 * and de-allocated 127 */ 128 static void ice_free_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) 129 { 130 dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.desc_buf.size, 131 cq->rq.desc_buf.va, cq->rq.desc_buf.pa); 132 cq->rq.desc_buf.va = NULL; 133 cq->rq.desc_buf.pa = 0; 134 cq->rq.desc_buf.size = 0; 135 } 136 137 /** 138 * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ 139 * @hw: pointer to the hardware structure 140 * @cq: pointer to the specific Control queue 141 */ 142 static enum ice_status 143 ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 144 { 145 int i; 146 147 /* We'll be allocating the buffer info memory first, then we can 148 * allocate the mapped buffers for the event processing 149 */ 150 cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries, 151 sizeof(cq->rq.desc_buf), GFP_KERNEL); 152 if (!cq->rq.dma_head) 153 return ICE_ERR_NO_MEMORY; 154 cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head; 155 156 /* allocate the mapped buffers */ 157 for (i = 0; i < cq->num_rq_entries; i++) { 158 struct ice_aq_desc *desc; 159 struct ice_dma_mem *bi; 160 161 bi = &cq->rq.r.rq_bi[i]; 162 bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw), 163 cq->rq_buf_size, &bi->pa, 164 GFP_KERNEL | __GFP_ZERO); 165 if (!bi->va) 166 goto unwind_alloc_rq_bufs; 167 bi->size = cq->rq_buf_size; 168 169 /* now configure the descriptors for use */ 170 desc = ICE_CTL_Q_DESC(cq->rq, i); 171 172 desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF); 173 if (cq->rq_buf_size > ICE_AQ_LG_BUF) 174 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB); 175 desc->opcode = 0; 176 /* This is in accordance with Admin queue design, there is no 177 * register for buffer size configuration 178 */ 179 desc->datalen = cpu_to_le16(bi->size); 180 desc->retval = 0; 181 desc->cookie_high = 0; 182 desc->cookie_low = 0; 183 desc->params.generic.addr_high = 184 cpu_to_le32(upper_32_bits(bi->pa)); 185 desc->params.generic.addr_low = 186 cpu_to_le32(lower_32_bits(bi->pa)); 187 desc->params.generic.param0 = 0; 188 desc->params.generic.param1 = 0; 189 } 190 return 0; 191 192 unwind_alloc_rq_bufs: 193 /* don't try to free the one that failed... */ 194 i--; 195 for (; i >= 0; i--) { 196 dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size, 197 cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa); 198 cq->rq.r.rq_bi[i].va = NULL; 199 cq->rq.r.rq_bi[i].pa = 0; 200 cq->rq.r.rq_bi[i].size = 0; 201 } 202 devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head); 203 204 return ICE_ERR_NO_MEMORY; 205 } 206 207 /** 208 * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ 209 * @hw: pointer to the hardware structure 210 * @cq: pointer to the specific Control queue 211 */ 212 static enum ice_status 213 ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 214 { 215 int i; 216 217 /* No mapped memory needed yet, just the buffer info structures */ 218 cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, 219 sizeof(cq->sq.desc_buf), GFP_KERNEL); 220 if (!cq->sq.dma_head) 221 return ICE_ERR_NO_MEMORY; 222 cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head; 223 224 /* allocate the mapped buffers */ 225 for (i = 0; i < cq->num_sq_entries; i++) { 226 struct ice_dma_mem *bi; 227 228 bi = &cq->sq.r.sq_bi[i]; 229 bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw), 230 cq->sq_buf_size, &bi->pa, 231 GFP_KERNEL | __GFP_ZERO); 232 if (!bi->va) 233 goto unwind_alloc_sq_bufs; 234 bi->size = cq->sq_buf_size; 235 } 236 return 0; 237 238 unwind_alloc_sq_bufs: 239 /* don't try to free the one that failed... */ 240 i--; 241 for (; i >= 0; i--) { 242 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.r.sq_bi[i].size, 243 cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa); 244 cq->sq.r.sq_bi[i].va = NULL; 245 cq->sq.r.sq_bi[i].pa = 0; 246 cq->sq.r.sq_bi[i].size = 0; 247 } 248 devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head); 249 250 return ICE_ERR_NO_MEMORY; 251 } 252 253 /** 254 * ice_free_rq_bufs - Free ARQ buffer info elements 255 * @hw: pointer to the hardware structure 256 * @cq: pointer to the specific Control queue 257 */ 258 static void ice_free_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 259 { 260 int i; 261 262 /* free descriptors */ 263 for (i = 0; i < cq->num_rq_entries; i++) { 264 dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size, 265 cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa); 266 cq->rq.r.rq_bi[i].va = NULL; 267 cq->rq.r.rq_bi[i].pa = 0; 268 cq->rq.r.rq_bi[i].size = 0; 269 } 270 271 /* free the dma header */ 272 devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head); 273 } 274 275 /** 276 * ice_free_sq_bufs - Free ATQ buffer info elements 277 * @hw: pointer to the hardware structure 278 * @cq: pointer to the specific Control queue 279 */ 280 static void ice_free_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 281 { 282 int i; 283 284 /* only unmap if the address is non-NULL */ 285 for (i = 0; i < cq->num_sq_entries; i++) 286 if (cq->sq.r.sq_bi[i].pa) { 287 dmam_free_coherent(ice_hw_to_dev(hw), 288 cq->sq.r.sq_bi[i].size, 289 cq->sq.r.sq_bi[i].va, 290 cq->sq.r.sq_bi[i].pa); 291 cq->sq.r.sq_bi[i].va = NULL; 292 cq->sq.r.sq_bi[i].pa = 0; 293 cq->sq.r.sq_bi[i].size = 0; 294 } 295 296 /* free the buffer info list */ 297 devm_kfree(ice_hw_to_dev(hw), cq->sq.cmd_buf); 298 299 /* free the dma header */ 300 devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head); 301 } 302 303 /** 304 * ice_cfg_sq_regs - configure Control ATQ registers 305 * @hw: pointer to the hardware structure 306 * @cq: pointer to the specific Control queue 307 * 308 * Configure base address and length registers for the transmit queue 309 */ 310 static enum ice_status 311 ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 312 { 313 u32 reg = 0; 314 315 /* Clear Head and Tail */ 316 wr32(hw, cq->sq.head, 0); 317 wr32(hw, cq->sq.tail, 0); 318 319 /* set starting point */ 320 wr32(hw, cq->sq.len, (cq->num_sq_entries | cq->sq.len_ena_mask)); 321 wr32(hw, cq->sq.bal, lower_32_bits(cq->sq.desc_buf.pa)); 322 wr32(hw, cq->sq.bah, upper_32_bits(cq->sq.desc_buf.pa)); 323 324 /* Check one register to verify that config was applied */ 325 reg = rd32(hw, cq->sq.bal); 326 if (reg != lower_32_bits(cq->sq.desc_buf.pa)) 327 return ICE_ERR_AQ_ERROR; 328 329 return 0; 330 } 331 332 /** 333 * ice_cfg_rq_regs - configure Control ARQ register 334 * @hw: pointer to the hardware structure 335 * @cq: pointer to the specific Control queue 336 * 337 * Configure base address and length registers for the receive (event q) 338 */ 339 static enum ice_status 340 ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 341 { 342 u32 reg = 0; 343 344 /* Clear Head and Tail */ 345 wr32(hw, cq->rq.head, 0); 346 wr32(hw, cq->rq.tail, 0); 347 348 /* set starting point */ 349 wr32(hw, cq->rq.len, (cq->num_rq_entries | cq->rq.len_ena_mask)); 350 wr32(hw, cq->rq.bal, lower_32_bits(cq->rq.desc_buf.pa)); 351 wr32(hw, cq->rq.bah, upper_32_bits(cq->rq.desc_buf.pa)); 352 353 /* Update tail in the HW to post pre-allocated buffers */ 354 wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1)); 355 356 /* Check one register to verify that config was applied */ 357 reg = rd32(hw, cq->rq.bal); 358 if (reg != lower_32_bits(cq->rq.desc_buf.pa)) 359 return ICE_ERR_AQ_ERROR; 360 361 return 0; 362 } 363 364 /** 365 * ice_init_sq - main initialization routine for Control ATQ 366 * @hw: pointer to the hardware structure 367 * @cq: pointer to the specific Control queue 368 * 369 * This is the main initialization routine for the Control Send Queue 370 * Prior to calling this function, drivers *MUST* set the following fields 371 * in the cq->structure: 372 * - cq->num_sq_entries 373 * - cq->sq_buf_size 374 * 375 * Do *NOT* hold the lock when calling this as the memory allocation routines 376 * called are not going to be atomic context safe 377 */ 378 static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 379 { 380 enum ice_status ret_code; 381 382 if (cq->sq.count > 0) { 383 /* queue already initialized */ 384 ret_code = ICE_ERR_NOT_READY; 385 goto init_ctrlq_exit; 386 } 387 388 /* verify input for valid configuration */ 389 if (!cq->num_sq_entries || !cq->sq_buf_size) { 390 ret_code = ICE_ERR_CFG; 391 goto init_ctrlq_exit; 392 } 393 394 cq->sq.next_to_use = 0; 395 cq->sq.next_to_clean = 0; 396 397 /* allocate the ring memory */ 398 ret_code = ice_alloc_ctrlq_sq_ring(hw, cq); 399 if (ret_code) 400 goto init_ctrlq_exit; 401 402 /* allocate buffers in the rings */ 403 ret_code = ice_alloc_sq_bufs(hw, cq); 404 if (ret_code) 405 goto init_ctrlq_free_rings; 406 407 /* initialize base registers */ 408 ret_code = ice_cfg_sq_regs(hw, cq); 409 if (ret_code) 410 goto init_ctrlq_free_rings; 411 412 /* success! */ 413 cq->sq.count = cq->num_sq_entries; 414 goto init_ctrlq_exit; 415 416 init_ctrlq_free_rings: 417 ice_free_ctrlq_sq_ring(hw, cq); 418 419 init_ctrlq_exit: 420 return ret_code; 421 } 422 423 /** 424 * ice_init_rq - initialize ARQ 425 * @hw: pointer to the hardware structure 426 * @cq: pointer to the specific Control queue 427 * 428 * The main initialization routine for the Admin Receive (Event) Queue. 429 * Prior to calling this function, drivers *MUST* set the following fields 430 * in the cq->structure: 431 * - cq->num_rq_entries 432 * - cq->rq_buf_size 433 * 434 * Do *NOT* hold the lock when calling this as the memory allocation routines 435 * called are not going to be atomic context safe 436 */ 437 static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 438 { 439 enum ice_status ret_code; 440 441 if (cq->rq.count > 0) { 442 /* queue already initialized */ 443 ret_code = ICE_ERR_NOT_READY; 444 goto init_ctrlq_exit; 445 } 446 447 /* verify input for valid configuration */ 448 if (!cq->num_rq_entries || !cq->rq_buf_size) { 449 ret_code = ICE_ERR_CFG; 450 goto init_ctrlq_exit; 451 } 452 453 cq->rq.next_to_use = 0; 454 cq->rq.next_to_clean = 0; 455 456 /* allocate the ring memory */ 457 ret_code = ice_alloc_ctrlq_rq_ring(hw, cq); 458 if (ret_code) 459 goto init_ctrlq_exit; 460 461 /* allocate buffers in the rings */ 462 ret_code = ice_alloc_rq_bufs(hw, cq); 463 if (ret_code) 464 goto init_ctrlq_free_rings; 465 466 /* initialize base registers */ 467 ret_code = ice_cfg_rq_regs(hw, cq); 468 if (ret_code) 469 goto init_ctrlq_free_rings; 470 471 /* success! */ 472 cq->rq.count = cq->num_rq_entries; 473 goto init_ctrlq_exit; 474 475 init_ctrlq_free_rings: 476 ice_free_ctrlq_rq_ring(hw, cq); 477 478 init_ctrlq_exit: 479 return ret_code; 480 } 481 482 /** 483 * ice_shutdown_sq - shutdown the Control ATQ 484 * @hw: pointer to the hardware structure 485 * @cq: pointer to the specific Control queue 486 * 487 * The main shutdown routine for the Control Transmit Queue 488 */ 489 static enum ice_status 490 ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 491 { 492 enum ice_status ret_code = 0; 493 494 mutex_lock(&cq->sq_lock); 495 496 if (!cq->sq.count) { 497 ret_code = ICE_ERR_NOT_READY; 498 goto shutdown_sq_out; 499 } 500 501 /* Stop firmware AdminQ processing */ 502 wr32(hw, cq->sq.head, 0); 503 wr32(hw, cq->sq.tail, 0); 504 wr32(hw, cq->sq.len, 0); 505 wr32(hw, cq->sq.bal, 0); 506 wr32(hw, cq->sq.bah, 0); 507 508 cq->sq.count = 0; /* to indicate uninitialized queue */ 509 510 /* free ring buffers and the ring itself */ 511 ice_free_sq_bufs(hw, cq); 512 ice_free_ctrlq_sq_ring(hw, cq); 513 514 shutdown_sq_out: 515 mutex_unlock(&cq->sq_lock); 516 return ret_code; 517 } 518 519 /** 520 * ice_aq_ver_check - Check the reported AQ API version. 521 * @fw_branch: The "branch" of FW, typically describes the device type 522 * @fw_major: The major version of the FW API 523 * @fw_minor: The minor version increment of the FW API 524 * 525 * Checks if the driver should load on a given AQ API version. 526 * 527 * Return: 'true' iff the driver should attempt to load. 'false' otherwise. 528 */ 529 static bool ice_aq_ver_check(u8 fw_branch, u8 fw_major, u8 fw_minor) 530 { 531 if (fw_branch != EXP_FW_API_VER_BRANCH) 532 return false; 533 if (fw_major != EXP_FW_API_VER_MAJOR) 534 return false; 535 if (fw_minor != EXP_FW_API_VER_MINOR) 536 return false; 537 return true; 538 } 539 540 /** 541 * ice_shutdown_rq - shutdown Control ARQ 542 * @hw: pointer to the hardware structure 543 * @cq: pointer to the specific Control queue 544 * 545 * The main shutdown routine for the Control Receive Queue 546 */ 547 static enum ice_status 548 ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 549 { 550 enum ice_status ret_code = 0; 551 552 mutex_lock(&cq->rq_lock); 553 554 if (!cq->rq.count) { 555 ret_code = ICE_ERR_NOT_READY; 556 goto shutdown_rq_out; 557 } 558 559 /* Stop Control Queue processing */ 560 wr32(hw, cq->rq.head, 0); 561 wr32(hw, cq->rq.tail, 0); 562 wr32(hw, cq->rq.len, 0); 563 wr32(hw, cq->rq.bal, 0); 564 wr32(hw, cq->rq.bah, 0); 565 566 /* set rq.count to 0 to indicate uninitialized queue */ 567 cq->rq.count = 0; 568 569 /* free ring buffers and the ring itself */ 570 ice_free_rq_bufs(hw, cq); 571 ice_free_ctrlq_rq_ring(hw, cq); 572 573 shutdown_rq_out: 574 mutex_unlock(&cq->rq_lock); 575 return ret_code; 576 } 577 578 /** 579 * ice_init_check_adminq - Check version for Admin Queue to know if its alive 580 * @hw: pointer to the hardware structure 581 */ 582 static enum ice_status ice_init_check_adminq(struct ice_hw *hw) 583 { 584 struct ice_ctl_q_info *cq = &hw->adminq; 585 enum ice_status status; 586 587 status = ice_aq_get_fw_ver(hw, NULL); 588 if (status) 589 goto init_ctrlq_free_rq; 590 591 if (!ice_aq_ver_check(hw->api_branch, hw->api_maj_ver, 592 hw->api_min_ver)) { 593 status = ICE_ERR_FW_API_VER; 594 goto init_ctrlq_free_rq; 595 } 596 597 return 0; 598 599 init_ctrlq_free_rq: 600 ice_shutdown_rq(hw, cq); 601 ice_shutdown_sq(hw, cq); 602 mutex_destroy(&cq->sq_lock); 603 mutex_destroy(&cq->rq_lock); 604 return status; 605 } 606 607 /** 608 * ice_init_ctrlq - main initialization routine for any control Queue 609 * @hw: pointer to the hardware structure 610 * @q_type: specific Control queue type 611 * 612 * Prior to calling this function, drivers *MUST* set the following fields 613 * in the cq->structure: 614 * - cq->num_sq_entries 615 * - cq->num_rq_entries 616 * - cq->rq_buf_size 617 * - cq->sq_buf_size 618 * 619 */ 620 static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) 621 { 622 struct ice_ctl_q_info *cq; 623 enum ice_status ret_code; 624 625 switch (q_type) { 626 case ICE_CTL_Q_ADMIN: 627 ice_adminq_init_regs(hw); 628 cq = &hw->adminq; 629 break; 630 default: 631 return ICE_ERR_PARAM; 632 } 633 cq->qtype = q_type; 634 635 /* verify input for valid configuration */ 636 if (!cq->num_rq_entries || !cq->num_sq_entries || 637 !cq->rq_buf_size || !cq->sq_buf_size) { 638 return ICE_ERR_CFG; 639 } 640 mutex_init(&cq->sq_lock); 641 mutex_init(&cq->rq_lock); 642 643 /* setup SQ command write back timeout */ 644 cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT; 645 646 /* allocate the ATQ */ 647 ret_code = ice_init_sq(hw, cq); 648 if (ret_code) 649 goto init_ctrlq_destroy_locks; 650 651 /* allocate the ARQ */ 652 ret_code = ice_init_rq(hw, cq); 653 if (ret_code) 654 goto init_ctrlq_free_sq; 655 656 /* success! */ 657 return 0; 658 659 init_ctrlq_free_sq: 660 ice_shutdown_sq(hw, cq); 661 init_ctrlq_destroy_locks: 662 mutex_destroy(&cq->sq_lock); 663 mutex_destroy(&cq->rq_lock); 664 return ret_code; 665 } 666 667 /** 668 * ice_init_all_ctrlq - main initialization routine for all control queues 669 * @hw: pointer to the hardware structure 670 * 671 * Prior to calling this function, drivers *MUST* set the following fields 672 * in the cq->structure for all control queues: 673 * - cq->num_sq_entries 674 * - cq->num_rq_entries 675 * - cq->rq_buf_size 676 * - cq->sq_buf_size 677 */ 678 enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) 679 { 680 enum ice_status ret_code; 681 682 /* Init FW admin queue */ 683 ret_code = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN); 684 if (ret_code) 685 return ret_code; 686 687 return ice_init_check_adminq(hw); 688 } 689 690 /** 691 * ice_shutdown_ctrlq - shutdown routine for any control queue 692 * @hw: pointer to the hardware structure 693 * @q_type: specific Control queue type 694 */ 695 static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) 696 { 697 struct ice_ctl_q_info *cq; 698 699 switch (q_type) { 700 case ICE_CTL_Q_ADMIN: 701 cq = &hw->adminq; 702 if (ice_check_sq_alive(hw, cq)) 703 ice_aq_q_shutdown(hw, true); 704 break; 705 default: 706 return; 707 } 708 709 ice_shutdown_sq(hw, cq); 710 ice_shutdown_rq(hw, cq); 711 mutex_destroy(&cq->sq_lock); 712 mutex_destroy(&cq->rq_lock); 713 } 714 715 /** 716 * ice_shutdown_all_ctrlq - shutdown routine for all control queues 717 * @hw: pointer to the hardware structure 718 */ 719 void ice_shutdown_all_ctrlq(struct ice_hw *hw) 720 { 721 /* Shutdown FW admin queue */ 722 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); 723 } 724 725 /** 726 * ice_clean_sq - cleans Admin send queue (ATQ) 727 * @hw: pointer to the hardware structure 728 * @cq: pointer to the specific Control queue 729 * 730 * returns the number of free desc 731 */ 732 static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 733 { 734 struct ice_ctl_q_ring *sq = &cq->sq; 735 u16 ntc = sq->next_to_clean; 736 struct ice_sq_cd *details; 737 struct ice_aq_desc *desc; 738 739 desc = ICE_CTL_Q_DESC(*sq, ntc); 740 details = ICE_CTL_Q_DETAILS(*sq, ntc); 741 742 while (rd32(hw, cq->sq.head) != ntc) { 743 ice_debug(hw, ICE_DBG_AQ_MSG, 744 "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head)); 745 memset(desc, 0, sizeof(*desc)); 746 memset(details, 0, sizeof(*details)); 747 ntc++; 748 if (ntc == sq->count) 749 ntc = 0; 750 desc = ICE_CTL_Q_DESC(*sq, ntc); 751 details = ICE_CTL_Q_DETAILS(*sq, ntc); 752 } 753 754 sq->next_to_clean = ntc; 755 756 return ICE_CTL_Q_DESC_UNUSED(sq); 757 } 758 759 /** 760 * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ) 761 * @hw: pointer to the hw struct 762 * @cq: pointer to the specific Control queue 763 * 764 * Returns true if the firmware has processed all descriptors on the 765 * admin send queue. Returns false if there are still requests pending. 766 */ 767 static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq) 768 { 769 /* AQ designers suggest use of head for better 770 * timing reliability than DD bit 771 */ 772 return rd32(hw, cq->sq.head) == cq->sq.next_to_use; 773 } 774 775 /** 776 * ice_sq_send_cmd - send command to Control Queue (ATQ) 777 * @hw: pointer to the hw struct 778 * @cq: pointer to the specific Control queue 779 * @desc: prefilled descriptor describing the command (non DMA mem) 780 * @buf: buffer to use for indirect commands (or NULL for direct commands) 781 * @buf_size: size of buffer for indirect commands (or 0 for direct commands) 782 * @cd: pointer to command details structure 783 * 784 * This is the main send command routine for the ATQ. It runs the q, 785 * cleans the queue, etc. 786 */ 787 enum ice_status 788 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, 789 struct ice_aq_desc *desc, void *buf, u16 buf_size, 790 struct ice_sq_cd *cd) 791 { 792 struct ice_dma_mem *dma_buf = NULL; 793 struct ice_aq_desc *desc_on_ring; 794 bool cmd_completed = false; 795 enum ice_status status = 0; 796 struct ice_sq_cd *details; 797 u32 total_delay = 0; 798 u16 retval = 0; 799 u32 val = 0; 800 801 mutex_lock(&cq->sq_lock); 802 803 cq->sq_last_status = ICE_AQ_RC_OK; 804 805 if (!cq->sq.count) { 806 ice_debug(hw, ICE_DBG_AQ_MSG, 807 "Control Send queue not initialized.\n"); 808 status = ICE_ERR_AQ_EMPTY; 809 goto sq_send_command_error; 810 } 811 812 if ((buf && !buf_size) || (!buf && buf_size)) { 813 status = ICE_ERR_PARAM; 814 goto sq_send_command_error; 815 } 816 817 if (buf) { 818 if (buf_size > cq->sq_buf_size) { 819 ice_debug(hw, ICE_DBG_AQ_MSG, 820 "Invalid buffer size for Control Send queue: %d.\n", 821 buf_size); 822 status = ICE_ERR_INVAL_SIZE; 823 goto sq_send_command_error; 824 } 825 826 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_BUF); 827 if (buf_size > ICE_AQ_LG_BUF) 828 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB); 829 } 830 831 val = rd32(hw, cq->sq.head); 832 if (val >= cq->num_sq_entries) { 833 ice_debug(hw, ICE_DBG_AQ_MSG, 834 "head overrun at %d in the Control Send Queue ring\n", 835 val); 836 status = ICE_ERR_AQ_EMPTY; 837 goto sq_send_command_error; 838 } 839 840 details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use); 841 if (cd) 842 memcpy(details, cd, sizeof(*details)); 843 else 844 memset(details, 0, sizeof(*details)); 845 846 /* Call clean and check queue available function to reclaim the 847 * descriptors that were processed by FW/MBX; the function returns the 848 * number of desc available. The clean function called here could be 849 * called in a separate thread in case of asynchronous completions. 850 */ 851 if (ice_clean_sq(hw, cq) == 0) { 852 ice_debug(hw, ICE_DBG_AQ_MSG, 853 "Error: Control Send Queue is full.\n"); 854 status = ICE_ERR_AQ_FULL; 855 goto sq_send_command_error; 856 } 857 858 /* initialize the temp desc pointer with the right desc */ 859 desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use); 860 861 /* if the desc is available copy the temp desc to the right place */ 862 memcpy(desc_on_ring, desc, sizeof(*desc_on_ring)); 863 864 /* if buf is not NULL assume indirect command */ 865 if (buf) { 866 dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use]; 867 /* copy the user buf into the respective DMA buf */ 868 memcpy(dma_buf->va, buf, buf_size); 869 desc_on_ring->datalen = cpu_to_le16(buf_size); 870 871 /* Update the address values in the desc with the pa value 872 * for respective buffer 873 */ 874 desc_on_ring->params.generic.addr_high = 875 cpu_to_le32(upper_32_bits(dma_buf->pa)); 876 desc_on_ring->params.generic.addr_low = 877 cpu_to_le32(lower_32_bits(dma_buf->pa)); 878 } 879 880 /* Debug desc and buffer */ 881 ice_debug(hw, ICE_DBG_AQ_MSG, 882 "ATQ: Control Send queue desc and buffer:\n"); 883 884 ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc_on_ring, buf, buf_size); 885 886 (cq->sq.next_to_use)++; 887 if (cq->sq.next_to_use == cq->sq.count) 888 cq->sq.next_to_use = 0; 889 wr32(hw, cq->sq.tail, cq->sq.next_to_use); 890 891 do { 892 if (ice_sq_done(hw, cq)) 893 break; 894 895 mdelay(1); 896 total_delay++; 897 } while (total_delay < cq->sq_cmd_timeout); 898 899 /* if ready, copy the desc back to temp */ 900 if (ice_sq_done(hw, cq)) { 901 memcpy(desc, desc_on_ring, sizeof(*desc)); 902 if (buf) { 903 /* get returned length to copy */ 904 u16 copy_size = le16_to_cpu(desc->datalen); 905 906 if (copy_size > buf_size) { 907 ice_debug(hw, ICE_DBG_AQ_MSG, 908 "Return len %d > than buf len %d\n", 909 copy_size, buf_size); 910 status = ICE_ERR_AQ_ERROR; 911 } else { 912 memcpy(buf, dma_buf->va, copy_size); 913 } 914 } 915 retval = le16_to_cpu(desc->retval); 916 if (retval) { 917 ice_debug(hw, ICE_DBG_AQ_MSG, 918 "Control Send Queue command completed with error 0x%x\n", 919 retval); 920 921 /* strip off FW internal code */ 922 retval &= 0xff; 923 } 924 cmd_completed = true; 925 if (!status && retval != ICE_AQ_RC_OK) 926 status = ICE_ERR_AQ_ERROR; 927 cq->sq_last_status = (enum ice_aq_err)retval; 928 } 929 930 ice_debug(hw, ICE_DBG_AQ_MSG, 931 "ATQ: desc and buffer writeback:\n"); 932 933 ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, buf, buf_size); 934 935 /* save writeback AQ if requested */ 936 if (details->wb_desc) 937 memcpy(details->wb_desc, desc_on_ring, 938 sizeof(*details->wb_desc)); 939 940 /* update the error if time out occurred */ 941 if (!cmd_completed) { 942 ice_debug(hw, ICE_DBG_AQ_MSG, 943 "Control Send Queue Writeback timeout.\n"); 944 status = ICE_ERR_AQ_TIMEOUT; 945 } 946 947 sq_send_command_error: 948 mutex_unlock(&cq->sq_lock); 949 return status; 950 } 951 952 /** 953 * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function 954 * @desc: pointer to the temp descriptor (non DMA mem) 955 * @opcode: the opcode can be used to decide which flags to turn off or on 956 * 957 * Fill the desc with default values 958 */ 959 void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode) 960 { 961 /* zero out the desc */ 962 memset(desc, 0, sizeof(*desc)); 963 desc->opcode = cpu_to_le16(opcode); 964 desc->flags = cpu_to_le16(ICE_AQ_FLAG_SI); 965 } 966 967 /** 968 * ice_clean_rq_elem 969 * @hw: pointer to the hw struct 970 * @cq: pointer to the specific Control queue 971 * @e: event info from the receive descriptor, includes any buffers 972 * @pending: number of events that could be left to process 973 * 974 * This function cleans one Admin Receive Queue element and returns 975 * the contents through e. It can also return how many events are 976 * left to process through 'pending'. 977 */ 978 enum ice_status 979 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, 980 struct ice_rq_event_info *e, u16 *pending) 981 { 982 u16 ntc = cq->rq.next_to_clean; 983 enum ice_status ret_code = 0; 984 struct ice_aq_desc *desc; 985 struct ice_dma_mem *bi; 986 u16 desc_idx; 987 u16 datalen; 988 u16 flags; 989 u16 ntu; 990 991 /* pre-clean the event info */ 992 memset(&e->desc, 0, sizeof(e->desc)); 993 994 /* take the lock before we start messing with the ring */ 995 mutex_lock(&cq->rq_lock); 996 997 if (!cq->rq.count) { 998 ice_debug(hw, ICE_DBG_AQ_MSG, 999 "Control Receive queue not initialized.\n"); 1000 ret_code = ICE_ERR_AQ_EMPTY; 1001 goto clean_rq_elem_err; 1002 } 1003 1004 /* set next_to_use to head */ 1005 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); 1006 1007 if (ntu == ntc) { 1008 /* nothing to do - shouldn't need to update ring's values */ 1009 ret_code = ICE_ERR_AQ_NO_WORK; 1010 goto clean_rq_elem_out; 1011 } 1012 1013 /* now clean the next descriptor */ 1014 desc = ICE_CTL_Q_DESC(cq->rq, ntc); 1015 desc_idx = ntc; 1016 1017 cq->rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval); 1018 flags = le16_to_cpu(desc->flags); 1019 if (flags & ICE_AQ_FLAG_ERR) { 1020 ret_code = ICE_ERR_AQ_ERROR; 1021 ice_debug(hw, ICE_DBG_AQ_MSG, 1022 "Control Receive Queue Event received with error 0x%x\n", 1023 cq->rq_last_status); 1024 } 1025 memcpy(&e->desc, desc, sizeof(e->desc)); 1026 datalen = le16_to_cpu(desc->datalen); 1027 e->msg_len = min(datalen, e->buf_len); 1028 if (e->msg_buf && e->msg_len) 1029 memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len); 1030 1031 ice_debug(hw, ICE_DBG_AQ_MSG, "ARQ: desc and buffer:\n"); 1032 1033 ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, e->msg_buf, 1034 cq->rq_buf_size); 1035 1036 /* Restore the original datalen and buffer address in the desc, 1037 * FW updates datalen to indicate the event message size 1038 */ 1039 bi = &cq->rq.r.rq_bi[ntc]; 1040 memset(desc, 0, sizeof(*desc)); 1041 1042 desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF); 1043 if (cq->rq_buf_size > ICE_AQ_LG_BUF) 1044 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB); 1045 desc->datalen = cpu_to_le16(bi->size); 1046 desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa)); 1047 desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); 1048 1049 /* set tail = the last cleaned desc index. */ 1050 wr32(hw, cq->rq.tail, ntc); 1051 /* ntc is updated to tail + 1 */ 1052 ntc++; 1053 if (ntc == cq->num_rq_entries) 1054 ntc = 0; 1055 cq->rq.next_to_clean = ntc; 1056 cq->rq.next_to_use = ntu; 1057 1058 clean_rq_elem_out: 1059 /* Set pending if needed, unlock and return */ 1060 if (pending) 1061 *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc)); 1062 clean_rq_elem_err: 1063 mutex_unlock(&cq->rq_lock); 1064 1065 return ret_code; 1066 } 1067