1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "i40e_type.h" 5 #include "i40e_register.h" 6 #include "i40e_adminq.h" 7 #include "i40e_prototype.h" 8 9 static void i40e_resume_aq(struct i40e_hw *hw); 10 11 /** 12 * i40e_adminq_init_regs - Initialize AdminQ registers 13 * @hw: pointer to the hardware structure 14 * 15 * This assumes the alloc_asq and alloc_arq functions have already been called 16 **/ 17 static void i40e_adminq_init_regs(struct i40e_hw *hw) 18 { 19 /* set head and tail registers in our local struct */ 20 if (i40e_is_vf(hw)) { 21 hw->aq.asq.tail = I40E_VF_ATQT1; 22 hw->aq.asq.head = I40E_VF_ATQH1; 23 hw->aq.asq.len = I40E_VF_ATQLEN1; 24 hw->aq.asq.bal = I40E_VF_ATQBAL1; 25 hw->aq.asq.bah = I40E_VF_ATQBAH1; 26 hw->aq.arq.tail = I40E_VF_ARQT1; 27 hw->aq.arq.head = I40E_VF_ARQH1; 28 hw->aq.arq.len = I40E_VF_ARQLEN1; 29 hw->aq.arq.bal = I40E_VF_ARQBAL1; 30 hw->aq.arq.bah = I40E_VF_ARQBAH1; 31 } else { 32 hw->aq.asq.tail = I40E_PF_ATQT; 33 hw->aq.asq.head = I40E_PF_ATQH; 34 hw->aq.asq.len = I40E_PF_ATQLEN; 35 hw->aq.asq.bal = I40E_PF_ATQBAL; 36 hw->aq.asq.bah = I40E_PF_ATQBAH; 37 hw->aq.arq.tail = I40E_PF_ARQT; 38 hw->aq.arq.head = I40E_PF_ARQH; 39 hw->aq.arq.len = I40E_PF_ARQLEN; 40 hw->aq.arq.bal = I40E_PF_ARQBAL; 41 hw->aq.arq.bah = I40E_PF_ARQBAH; 42 } 43 } 44 45 /** 46 * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings 47 * @hw: pointer to the hardware structure 48 **/ 49 static int i40e_alloc_adminq_asq_ring(struct i40e_hw *hw) 50 { 51 int ret_code; 52 53 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf, 54 i40e_mem_atq_ring, 55 (hw->aq.num_asq_entries * 56 sizeof(struct i40e_aq_desc)), 57 I40E_ADMINQ_DESC_ALIGNMENT); 58 if (ret_code) 59 return ret_code; 60 61 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf, 62 (hw->aq.num_asq_entries * 63 sizeof(struct i40e_asq_cmd_details))); 64 if (ret_code) { 65 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); 66 return ret_code; 67 } 68 69 return ret_code; 70 } 71 72 /** 73 * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings 74 * @hw: pointer to the hardware structure 75 **/ 76 static int i40e_alloc_adminq_arq_ring(struct i40e_hw *hw) 77 { 78 int ret_code; 79 80 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf, 81 i40e_mem_arq_ring, 82 (hw->aq.num_arq_entries * 83 sizeof(struct i40e_aq_desc)), 84 I40E_ADMINQ_DESC_ALIGNMENT); 85 86 return ret_code; 87 } 88 89 /** 90 * i40e_free_adminq_asq - Free Admin Queue send rings 91 * @hw: pointer to the hardware structure 92 * 93 * This assumes the posted send buffers have already been cleaned 94 * and de-allocated 95 **/ 96 static void i40e_free_adminq_asq(struct i40e_hw *hw) 97 { 98 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); 99 } 100 101 /** 102 * i40e_free_adminq_arq - Free Admin Queue receive rings 103 * @hw: pointer to the hardware structure 104 * 105 * This assumes the posted receive buffers have already been cleaned 106 * and de-allocated 107 **/ 108 static void i40e_free_adminq_arq(struct i40e_hw *hw) 109 { 110 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); 111 } 112 113 /** 114 * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue 115 * @hw: pointer to the hardware structure 116 **/ 117 static int i40e_alloc_arq_bufs(struct i40e_hw *hw) 118 { 119 struct i40e_aq_desc *desc; 120 struct i40e_dma_mem *bi; 121 int ret_code; 122 int i; 123 124 /* We'll be allocating the buffer info memory first, then we can 125 * allocate the mapped buffers for the event processing 126 */ 127 128 /* buffer_info structures do not need alignment */ 129 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head, 130 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem))); 131 if (ret_code) 132 goto alloc_arq_bufs; 133 hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va; 134 135 /* allocate the mapped buffers */ 136 for (i = 0; i < hw->aq.num_arq_entries; i++) { 137 bi = &hw->aq.arq.r.arq_bi[i]; 138 ret_code = i40e_allocate_dma_mem(hw, bi, 139 i40e_mem_arq_buf, 140 hw->aq.arq_buf_size, 141 I40E_ADMINQ_DESC_ALIGNMENT); 142 if (ret_code) 143 goto unwind_alloc_arq_bufs; 144 145 /* now configure the descriptors for use */ 146 desc = I40E_ADMINQ_DESC(hw->aq.arq, i); 147 148 desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF); 149 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) 150 desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB); 151 desc->opcode = 0; 152 /* This is in accordance with Admin queue design, there is no 153 * register for buffer size configuration 154 */ 155 desc->datalen = cpu_to_le16((u16)bi->size); 156 desc->retval = 0; 157 desc->cookie_high = 0; 158 desc->cookie_low = 0; 159 desc->params.external.addr_high = 160 cpu_to_le32(upper_32_bits(bi->pa)); 161 desc->params.external.addr_low = 162 cpu_to_le32(lower_32_bits(bi->pa)); 163 desc->params.external.param0 = 0; 164 desc->params.external.param1 = 0; 165 } 166 167 alloc_arq_bufs: 168 return ret_code; 169 170 unwind_alloc_arq_bufs: 171 /* don't try to free the one that failed... */ 172 i--; 173 for (; i >= 0; i--) 174 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); 175 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); 176 177 return ret_code; 178 } 179 180 /** 181 * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue 182 * @hw: pointer to the hardware structure 183 **/ 184 static int i40e_alloc_asq_bufs(struct i40e_hw *hw) 185 { 186 struct i40e_dma_mem *bi; 187 int ret_code; 188 int i; 189 190 /* No mapped memory needed yet, just the buffer info structures */ 191 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head, 192 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem))); 193 if (ret_code) 194 goto alloc_asq_bufs; 195 hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va; 196 197 /* allocate the mapped buffers */ 198 for (i = 0; i < hw->aq.num_asq_entries; i++) { 199 bi = &hw->aq.asq.r.asq_bi[i]; 200 ret_code = i40e_allocate_dma_mem(hw, bi, 201 i40e_mem_asq_buf, 202 hw->aq.asq_buf_size, 203 I40E_ADMINQ_DESC_ALIGNMENT); 204 if (ret_code) 205 goto unwind_alloc_asq_bufs; 206 } 207 alloc_asq_bufs: 208 return ret_code; 209 210 unwind_alloc_asq_bufs: 211 /* don't try to free the one that failed... */ 212 i--; 213 for (; i >= 0; i--) 214 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); 215 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); 216 217 return ret_code; 218 } 219 220 /** 221 * i40e_free_arq_bufs - Free receive queue buffer info elements 222 * @hw: pointer to the hardware structure 223 **/ 224 static void i40e_free_arq_bufs(struct i40e_hw *hw) 225 { 226 int i; 227 228 /* free descriptors */ 229 for (i = 0; i < hw->aq.num_arq_entries; i++) 230 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); 231 232 /* free the descriptor memory */ 233 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); 234 235 /* free the dma header */ 236 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); 237 } 238 239 /** 240 * i40e_free_asq_bufs - Free send queue buffer info elements 241 * @hw: pointer to the hardware structure 242 **/ 243 static void i40e_free_asq_bufs(struct i40e_hw *hw) 244 { 245 int i; 246 247 /* only unmap if the address is non-NULL */ 248 for (i = 0; i < hw->aq.num_asq_entries; i++) 249 if (hw->aq.asq.r.asq_bi[i].pa) 250 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); 251 252 /* free the buffer info list */ 253 i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf); 254 255 /* free the descriptor memory */ 256 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); 257 258 /* free the dma header */ 259 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); 260 } 261 262 /** 263 * i40e_config_asq_regs - configure ASQ registers 264 * @hw: pointer to the hardware structure 265 * 266 * Configure base address and length registers for the transmit queue 267 **/ 268 static int i40e_config_asq_regs(struct i40e_hw *hw) 269 { 270 int ret_code = 0; 271 u32 reg = 0; 272 273 /* Clear Head and Tail */ 274 wr32(hw, hw->aq.asq.head, 0); 275 wr32(hw, hw->aq.asq.tail, 0); 276 277 /* set starting point */ 278 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | 279 I40E_PF_ATQLEN_ATQENABLE_MASK)); 280 wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa)); 281 wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa)); 282 283 /* Check one register to verify that config was applied */ 284 reg = rd32(hw, hw->aq.asq.bal); 285 if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa)) 286 ret_code = -EIO; 287 288 return ret_code; 289 } 290 291 /** 292 * i40e_config_arq_regs - ARQ register configuration 293 * @hw: pointer to the hardware structure 294 * 295 * Configure base address and length registers for the receive (event queue) 296 **/ 297 static int i40e_config_arq_regs(struct i40e_hw *hw) 298 { 299 int ret_code = 0; 300 u32 reg = 0; 301 302 /* Clear Head and Tail */ 303 wr32(hw, hw->aq.arq.head, 0); 304 wr32(hw, hw->aq.arq.tail, 0); 305 306 /* set starting point */ 307 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | 308 I40E_PF_ARQLEN_ARQENABLE_MASK)); 309 wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa)); 310 wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa)); 311 312 /* Update tail in the HW to post pre-allocated buffers */ 313 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); 314 315 /* Check one register to verify that config was applied */ 316 reg = rd32(hw, hw->aq.arq.bal); 317 if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa)) 318 ret_code = -EIO; 319 320 return ret_code; 321 } 322 323 /** 324 * i40e_init_asq - main initialization routine for ASQ 325 * @hw: pointer to the hardware structure 326 * 327 * This is the main initialization routine for the Admin Send Queue 328 * Prior to calling this function, drivers *MUST* set the following fields 329 * in the hw->aq structure: 330 * - hw->aq.num_asq_entries 331 * - hw->aq.arq_buf_size 332 * 333 * Do *NOT* hold the lock when calling this as the memory allocation routines 334 * called are not going to be atomic context safe 335 **/ 336 static int i40e_init_asq(struct i40e_hw *hw) 337 { 338 int ret_code = 0; 339 340 if (hw->aq.asq.count > 0) { 341 /* queue already initialized */ 342 ret_code = -EBUSY; 343 goto init_adminq_exit; 344 } 345 346 /* verify input for valid configuration */ 347 if ((hw->aq.num_asq_entries == 0) || 348 (hw->aq.asq_buf_size == 0)) { 349 ret_code = -EIO; 350 goto init_adminq_exit; 351 } 352 353 hw->aq.asq.next_to_use = 0; 354 hw->aq.asq.next_to_clean = 0; 355 356 /* allocate the ring memory */ 357 ret_code = i40e_alloc_adminq_asq_ring(hw); 358 if (ret_code) 359 goto init_adminq_exit; 360 361 /* allocate buffers in the rings */ 362 ret_code = i40e_alloc_asq_bufs(hw); 363 if (ret_code) 364 goto init_adminq_free_rings; 365 366 /* initialize base registers */ 367 ret_code = i40e_config_asq_regs(hw); 368 if (ret_code) 369 goto init_adminq_free_rings; 370 371 /* success! */ 372 hw->aq.asq.count = hw->aq.num_asq_entries; 373 goto init_adminq_exit; 374 375 init_adminq_free_rings: 376 i40e_free_adminq_asq(hw); 377 378 init_adminq_exit: 379 return ret_code; 380 } 381 382 /** 383 * i40e_init_arq - initialize ARQ 384 * @hw: pointer to the hardware structure 385 * 386 * The main initialization routine for the Admin Receive (Event) Queue. 387 * Prior to calling this function, drivers *MUST* set the following fields 388 * in the hw->aq structure: 389 * - hw->aq.num_asq_entries 390 * - hw->aq.arq_buf_size 391 * 392 * Do *NOT* hold the lock when calling this as the memory allocation routines 393 * called are not going to be atomic context safe 394 **/ 395 static int i40e_init_arq(struct i40e_hw *hw) 396 { 397 int ret_code = 0; 398 399 if (hw->aq.arq.count > 0) { 400 /* queue already initialized */ 401 ret_code = -EBUSY; 402 goto init_adminq_exit; 403 } 404 405 /* verify input for valid configuration */ 406 if ((hw->aq.num_arq_entries == 0) || 407 (hw->aq.arq_buf_size == 0)) { 408 ret_code = -EIO; 409 goto init_adminq_exit; 410 } 411 412 hw->aq.arq.next_to_use = 0; 413 hw->aq.arq.next_to_clean = 0; 414 415 /* allocate the ring memory */ 416 ret_code = i40e_alloc_adminq_arq_ring(hw); 417 if (ret_code) 418 goto init_adminq_exit; 419 420 /* allocate buffers in the rings */ 421 ret_code = i40e_alloc_arq_bufs(hw); 422 if (ret_code) 423 goto init_adminq_free_rings; 424 425 /* initialize base registers */ 426 ret_code = i40e_config_arq_regs(hw); 427 if (ret_code) 428 goto init_adminq_free_rings; 429 430 /* success! */ 431 hw->aq.arq.count = hw->aq.num_arq_entries; 432 goto init_adminq_exit; 433 434 init_adminq_free_rings: 435 i40e_free_adminq_arq(hw); 436 437 init_adminq_exit: 438 return ret_code; 439 } 440 441 /** 442 * i40e_shutdown_asq - shutdown the ASQ 443 * @hw: pointer to the hardware structure 444 * 445 * The main shutdown routine for the Admin Send Queue 446 **/ 447 static int i40e_shutdown_asq(struct i40e_hw *hw) 448 { 449 int ret_code = 0; 450 451 mutex_lock(&hw->aq.asq_mutex); 452 453 if (hw->aq.asq.count == 0) { 454 ret_code = -EBUSY; 455 goto shutdown_asq_out; 456 } 457 458 /* Stop firmware AdminQ processing */ 459 wr32(hw, hw->aq.asq.head, 0); 460 wr32(hw, hw->aq.asq.tail, 0); 461 wr32(hw, hw->aq.asq.len, 0); 462 wr32(hw, hw->aq.asq.bal, 0); 463 wr32(hw, hw->aq.asq.bah, 0); 464 465 hw->aq.asq.count = 0; /* to indicate uninitialized queue */ 466 467 /* free ring buffers */ 468 i40e_free_asq_bufs(hw); 469 470 shutdown_asq_out: 471 mutex_unlock(&hw->aq.asq_mutex); 472 return ret_code; 473 } 474 475 /** 476 * i40e_shutdown_arq - shutdown ARQ 477 * @hw: pointer to the hardware structure 478 * 479 * The main shutdown routine for the Admin Receive Queue 480 **/ 481 static int i40e_shutdown_arq(struct i40e_hw *hw) 482 { 483 int ret_code = 0; 484 485 mutex_lock(&hw->aq.arq_mutex); 486 487 if (hw->aq.arq.count == 0) { 488 ret_code = -EBUSY; 489 goto shutdown_arq_out; 490 } 491 492 /* Stop firmware AdminQ processing */ 493 wr32(hw, hw->aq.arq.head, 0); 494 wr32(hw, hw->aq.arq.tail, 0); 495 wr32(hw, hw->aq.arq.len, 0); 496 wr32(hw, hw->aq.arq.bal, 0); 497 wr32(hw, hw->aq.arq.bah, 0); 498 499 hw->aq.arq.count = 0; /* to indicate uninitialized queue */ 500 501 /* free ring buffers */ 502 i40e_free_arq_bufs(hw); 503 504 shutdown_arq_out: 505 mutex_unlock(&hw->aq.arq_mutex); 506 return ret_code; 507 } 508 509 /** 510 * i40e_set_hw_flags - set HW flags 511 * @hw: pointer to the hardware structure 512 **/ 513 static void i40e_set_hw_flags(struct i40e_hw *hw) 514 { 515 struct i40e_adminq_info *aq = &hw->aq; 516 517 hw->flags = 0; 518 519 switch (hw->mac.type) { 520 case I40E_MAC_XL710: 521 if (aq->api_maj_ver > 1 || 522 (aq->api_maj_ver == 1 && 523 aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710)) { 524 hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE; 525 hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; 526 /* The ability to RX (not drop) 802.1ad frames */ 527 hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE; 528 } 529 break; 530 case I40E_MAC_X722: 531 hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE | 532 I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK; 533 534 if (aq->api_maj_ver > 1 || 535 (aq->api_maj_ver == 1 && 536 aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722)) 537 hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; 538 539 if (aq->api_maj_ver > 1 || 540 (aq->api_maj_ver == 1 && 541 aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722)) 542 hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE; 543 544 if (aq->api_maj_ver > 1 || 545 (aq->api_maj_ver == 1 && 546 aq->api_min_ver >= I40E_MINOR_VER_FW_REQUEST_FEC_X722)) 547 hw->flags |= I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE; 548 549 fallthrough; 550 default: 551 break; 552 } 553 554 /* Newer versions of firmware require lock when reading the NVM */ 555 if (aq->api_maj_ver > 1 || 556 (aq->api_maj_ver == 1 && 557 aq->api_min_ver >= 5)) 558 hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK; 559 560 if (aq->api_maj_ver > 1 || 561 (aq->api_maj_ver == 1 && 562 aq->api_min_ver >= 8)) { 563 hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT; 564 hw->flags |= I40E_HW_FLAG_DROP_MODE; 565 } 566 567 if (aq->api_maj_ver > 1 || 568 (aq->api_maj_ver == 1 && 569 aq->api_min_ver >= 9)) 570 hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED; 571 } 572 573 /** 574 * i40e_init_adminq - main initialization routine for Admin Queue 575 * @hw: pointer to the hardware structure 576 * 577 * Prior to calling this function, drivers *MUST* set the following fields 578 * in the hw->aq structure: 579 * - hw->aq.num_asq_entries 580 * - hw->aq.num_arq_entries 581 * - hw->aq.arq_buf_size 582 * - hw->aq.asq_buf_size 583 **/ 584 int i40e_init_adminq(struct i40e_hw *hw) 585 { 586 u16 cfg_ptr, oem_hi, oem_lo; 587 u16 eetrack_lo, eetrack_hi; 588 int retry = 0; 589 int ret_code; 590 591 /* verify input for valid configuration */ 592 if ((hw->aq.num_arq_entries == 0) || 593 (hw->aq.num_asq_entries == 0) || 594 (hw->aq.arq_buf_size == 0) || 595 (hw->aq.asq_buf_size == 0)) { 596 ret_code = -EIO; 597 goto init_adminq_exit; 598 } 599 600 /* Set up register offsets */ 601 i40e_adminq_init_regs(hw); 602 603 /* setup ASQ command write back timeout */ 604 hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT; 605 606 /* allocate the ASQ */ 607 ret_code = i40e_init_asq(hw); 608 if (ret_code) 609 goto init_adminq_destroy_locks; 610 611 /* allocate the ARQ */ 612 ret_code = i40e_init_arq(hw); 613 if (ret_code) 614 goto init_adminq_free_asq; 615 616 /* There are some cases where the firmware may not be quite ready 617 * for AdminQ operations, so we retry the AdminQ setup a few times 618 * if we see timeouts in this first AQ call. 619 */ 620 do { 621 ret_code = i40e_aq_get_firmware_version(hw, 622 &hw->aq.fw_maj_ver, 623 &hw->aq.fw_min_ver, 624 &hw->aq.fw_build, 625 &hw->aq.api_maj_ver, 626 &hw->aq.api_min_ver, 627 NULL); 628 if (ret_code != -EIO) 629 break; 630 retry++; 631 msleep(100); 632 i40e_resume_aq(hw); 633 } while (retry < 10); 634 if (ret_code != 0) 635 goto init_adminq_free_arq; 636 637 /* Some features were introduced in different FW API version 638 * for different MAC type. 639 */ 640 i40e_set_hw_flags(hw); 641 642 /* get the NVM version info */ 643 i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION, 644 &hw->nvm.version); 645 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo); 646 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi); 647 hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo; 648 i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr); 649 i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF), 650 &oem_hi); 651 i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)), 652 &oem_lo); 653 hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo; 654 655 if (hw->mac.type == I40E_MAC_XL710 && 656 hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 657 hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) { 658 hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE; 659 hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; 660 } 661 if (hw->mac.type == I40E_MAC_X722 && 662 hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 663 hw->aq.api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722) { 664 hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; 665 } 666 667 /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */ 668 if (hw->aq.api_maj_ver > 1 || 669 (hw->aq.api_maj_ver == 1 && 670 hw->aq.api_min_ver >= 7)) 671 hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE; 672 673 if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) { 674 ret_code = -EIO; 675 goto init_adminq_free_arq; 676 } 677 678 /* pre-emptive resource lock release */ 679 i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); 680 hw->nvm_release_on_done = false; 681 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 682 683 ret_code = 0; 684 685 /* success! */ 686 goto init_adminq_exit; 687 688 init_adminq_free_arq: 689 i40e_shutdown_arq(hw); 690 init_adminq_free_asq: 691 i40e_shutdown_asq(hw); 692 init_adminq_destroy_locks: 693 694 init_adminq_exit: 695 return ret_code; 696 } 697 698 /** 699 * i40e_shutdown_adminq - shutdown routine for the Admin Queue 700 * @hw: pointer to the hardware structure 701 **/ 702 void i40e_shutdown_adminq(struct i40e_hw *hw) 703 { 704 if (i40e_check_asq_alive(hw)) 705 i40e_aq_queue_shutdown(hw, true); 706 707 i40e_shutdown_asq(hw); 708 i40e_shutdown_arq(hw); 709 710 if (hw->nvm_buff.va) 711 i40e_free_virt_mem(hw, &hw->nvm_buff); 712 } 713 714 /** 715 * i40e_clean_asq - cleans Admin send queue 716 * @hw: pointer to the hardware structure 717 * 718 * returns the number of free desc 719 **/ 720 static u16 i40e_clean_asq(struct i40e_hw *hw) 721 { 722 struct i40e_adminq_ring *asq = &(hw->aq.asq); 723 struct i40e_asq_cmd_details *details; 724 u16 ntc = asq->next_to_clean; 725 struct i40e_aq_desc desc_cb; 726 struct i40e_aq_desc *desc; 727 728 desc = I40E_ADMINQ_DESC(*asq, ntc); 729 details = I40E_ADMINQ_DETAILS(*asq, ntc); 730 while (rd32(hw, hw->aq.asq.head) != ntc) { 731 i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, 732 "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); 733 734 if (details->callback) { 735 I40E_ADMINQ_CALLBACK cb_func = 736 (I40E_ADMINQ_CALLBACK)details->callback; 737 desc_cb = *desc; 738 cb_func(hw, &desc_cb); 739 } 740 memset(desc, 0, sizeof(*desc)); 741 memset(details, 0, sizeof(*details)); 742 ntc++; 743 if (ntc == asq->count) 744 ntc = 0; 745 desc = I40E_ADMINQ_DESC(*asq, ntc); 746 details = I40E_ADMINQ_DETAILS(*asq, ntc); 747 } 748 749 asq->next_to_clean = ntc; 750 751 return I40E_DESC_UNUSED(asq); 752 } 753 754 /** 755 * i40e_asq_done - check if FW has processed the Admin Send Queue 756 * @hw: pointer to the hw struct 757 * 758 * Returns true if the firmware has processed all descriptors on the 759 * admin send queue. Returns false if there are still requests pending. 760 **/ 761 static bool i40e_asq_done(struct i40e_hw *hw) 762 { 763 /* AQ designers suggest use of head for better 764 * timing reliability than DD bit 765 */ 766 return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; 767 768 } 769 770 /** 771 * i40e_asq_send_command_atomic_exec - send command to Admin Queue 772 * @hw: pointer to the hw struct 773 * @desc: prefilled descriptor describing the command (non DMA mem) 774 * @buff: buffer to use for indirect commands 775 * @buff_size: size of buffer for indirect commands 776 * @cmd_details: pointer to command details structure 777 * @is_atomic_context: is the function called in an atomic context? 778 * 779 * This is the main send command driver routine for the Admin Queue send 780 * queue. It runs the queue, cleans the queue, etc 781 **/ 782 static int 783 i40e_asq_send_command_atomic_exec(struct i40e_hw *hw, 784 struct i40e_aq_desc *desc, 785 void *buff, /* can be NULL */ 786 u16 buff_size, 787 struct i40e_asq_cmd_details *cmd_details, 788 bool is_atomic_context) 789 { 790 struct i40e_dma_mem *dma_buff = NULL; 791 struct i40e_asq_cmd_details *details; 792 struct i40e_aq_desc *desc_on_ring; 793 bool cmd_completed = false; 794 u16 retval = 0; 795 int status = 0; 796 u32 val = 0; 797 798 if (hw->aq.asq.count == 0) { 799 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 800 "AQTX: Admin queue not initialized.\n"); 801 status = -EIO; 802 goto asq_send_command_error; 803 } 804 805 hw->aq.asq_last_status = I40E_AQ_RC_OK; 806 807 val = rd32(hw, hw->aq.asq.head); 808 if (val >= hw->aq.num_asq_entries) { 809 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 810 "AQTX: head overrun at %d\n", val); 811 status = -ENOSPC; 812 goto asq_send_command_error; 813 } 814 815 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); 816 if (cmd_details) { 817 *details = *cmd_details; 818 819 /* If the cmd_details are defined copy the cookie. The 820 * cpu_to_le32 is not needed here because the data is ignored 821 * by the FW, only used by the driver 822 */ 823 if (details->cookie) { 824 desc->cookie_high = 825 cpu_to_le32(upper_32_bits(details->cookie)); 826 desc->cookie_low = 827 cpu_to_le32(lower_32_bits(details->cookie)); 828 } 829 } else { 830 memset(details, 0, sizeof(struct i40e_asq_cmd_details)); 831 } 832 833 /* clear requested flags and then set additional flags if defined */ 834 desc->flags &= ~cpu_to_le16(details->flags_dis); 835 desc->flags |= cpu_to_le16(details->flags_ena); 836 837 if (buff_size > hw->aq.asq_buf_size) { 838 i40e_debug(hw, 839 I40E_DEBUG_AQ_MESSAGE, 840 "AQTX: Invalid buffer size: %d.\n", 841 buff_size); 842 status = -EINVAL; 843 goto asq_send_command_error; 844 } 845 846 if (details->postpone && !details->async) { 847 i40e_debug(hw, 848 I40E_DEBUG_AQ_MESSAGE, 849 "AQTX: Async flag not set along with postpone flag"); 850 status = -EINVAL; 851 goto asq_send_command_error; 852 } 853 854 /* call clean and check queue available function to reclaim the 855 * descriptors that were processed by FW, the function returns the 856 * number of desc available 857 */ 858 /* the clean function called here could be called in a separate thread 859 * in case of asynchronous completions 860 */ 861 if (i40e_clean_asq(hw) == 0) { 862 i40e_debug(hw, 863 I40E_DEBUG_AQ_MESSAGE, 864 "AQTX: Error queue is full.\n"); 865 status = -ENOSPC; 866 goto asq_send_command_error; 867 } 868 869 /* initialize the temp desc pointer with the right desc */ 870 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); 871 872 /* if the desc is available copy the temp desc to the right place */ 873 *desc_on_ring = *desc; 874 875 /* if buff is not NULL assume indirect command */ 876 if (buff != NULL) { 877 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]); 878 /* copy the user buff into the respective DMA buff */ 879 memcpy(dma_buff->va, buff, buff_size); 880 desc_on_ring->datalen = cpu_to_le16(buff_size); 881 882 /* Update the address values in the desc with the pa value 883 * for respective buffer 884 */ 885 desc_on_ring->params.external.addr_high = 886 cpu_to_le32(upper_32_bits(dma_buff->pa)); 887 desc_on_ring->params.external.addr_low = 888 cpu_to_le32(lower_32_bits(dma_buff->pa)); 889 } 890 891 /* bump the tail */ 892 i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQTX: desc and buffer:\n"); 893 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, 894 buff, buff_size); 895 (hw->aq.asq.next_to_use)++; 896 if (hw->aq.asq.next_to_use == hw->aq.asq.count) 897 hw->aq.asq.next_to_use = 0; 898 if (!details->postpone) 899 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use); 900 901 /* if cmd_details are not defined or async flag is not set, 902 * we need to wait for desc write back 903 */ 904 if (!details->async && !details->postpone) { 905 u32 total_delay = 0; 906 907 do { 908 /* AQ designers suggest use of head for better 909 * timing reliability than DD bit 910 */ 911 if (i40e_asq_done(hw)) 912 break; 913 914 if (is_atomic_context) 915 udelay(50); 916 else 917 usleep_range(40, 60); 918 919 total_delay += 50; 920 } while (total_delay < hw->aq.asq_cmd_timeout); 921 } 922 923 /* if ready, copy the desc back to temp */ 924 if (i40e_asq_done(hw)) { 925 *desc = *desc_on_ring; 926 if (buff != NULL) 927 memcpy(buff, dma_buff->va, buff_size); 928 retval = le16_to_cpu(desc->retval); 929 if (retval != 0) { 930 i40e_debug(hw, 931 I40E_DEBUG_AQ_MESSAGE, 932 "AQTX: Command completed with error 0x%X.\n", 933 retval); 934 935 /* strip off FW internal code */ 936 retval &= 0xff; 937 } 938 cmd_completed = true; 939 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK) 940 status = 0; 941 else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY) 942 status = -EBUSY; 943 else 944 status = -EIO; 945 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; 946 } 947 948 i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, 949 "AQTX: desc and buffer writeback:\n"); 950 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size); 951 952 /* save writeback aq if requested */ 953 if (details->wb_desc) 954 *details->wb_desc = *desc_on_ring; 955 956 /* update the error if time out occurred */ 957 if ((!cmd_completed) && 958 (!details->async && !details->postpone)) { 959 if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) { 960 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 961 "AQTX: AQ Critical error.\n"); 962 status = -EIO; 963 } else { 964 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 965 "AQTX: Writeback timeout.\n"); 966 status = -EIO; 967 } 968 } 969 970 asq_send_command_error: 971 return status; 972 } 973 974 /** 975 * i40e_asq_send_command_atomic - send command to Admin Queue 976 * @hw: pointer to the hw struct 977 * @desc: prefilled descriptor describing the command (non DMA mem) 978 * @buff: buffer to use for indirect commands 979 * @buff_size: size of buffer for indirect commands 980 * @cmd_details: pointer to command details structure 981 * @is_atomic_context: is the function called in an atomic context? 982 * 983 * Acquires the lock and calls the main send command execution 984 * routine. 985 **/ 986 int 987 i40e_asq_send_command_atomic(struct i40e_hw *hw, 988 struct i40e_aq_desc *desc, 989 void *buff, /* can be NULL */ 990 u16 buff_size, 991 struct i40e_asq_cmd_details *cmd_details, 992 bool is_atomic_context) 993 { 994 int status; 995 996 mutex_lock(&hw->aq.asq_mutex); 997 status = i40e_asq_send_command_atomic_exec(hw, desc, buff, buff_size, 998 cmd_details, 999 is_atomic_context); 1000 1001 mutex_unlock(&hw->aq.asq_mutex); 1002 return status; 1003 } 1004 1005 int 1006 i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc, 1007 void *buff, /* can be NULL */ u16 buff_size, 1008 struct i40e_asq_cmd_details *cmd_details) 1009 { 1010 return i40e_asq_send_command_atomic(hw, desc, buff, buff_size, 1011 cmd_details, false); 1012 } 1013 1014 /** 1015 * i40e_asq_send_command_atomic_v2 - send command to Admin Queue 1016 * @hw: pointer to the hw struct 1017 * @desc: prefilled descriptor describing the command (non DMA mem) 1018 * @buff: buffer to use for indirect commands 1019 * @buff_size: size of buffer for indirect commands 1020 * @cmd_details: pointer to command details structure 1021 * @is_atomic_context: is the function called in an atomic context? 1022 * @aq_status: pointer to Admin Queue status return value 1023 * 1024 * Acquires the lock and calls the main send command execution 1025 * routine. Returns the last Admin Queue status in aq_status 1026 * to avoid race conditions in access to hw->aq.asq_last_status. 1027 **/ 1028 int 1029 i40e_asq_send_command_atomic_v2(struct i40e_hw *hw, 1030 struct i40e_aq_desc *desc, 1031 void *buff, /* can be NULL */ 1032 u16 buff_size, 1033 struct i40e_asq_cmd_details *cmd_details, 1034 bool is_atomic_context, 1035 enum i40e_admin_queue_err *aq_status) 1036 { 1037 int status; 1038 1039 mutex_lock(&hw->aq.asq_mutex); 1040 status = i40e_asq_send_command_atomic_exec(hw, desc, buff, 1041 buff_size, 1042 cmd_details, 1043 is_atomic_context); 1044 if (aq_status) 1045 *aq_status = hw->aq.asq_last_status; 1046 mutex_unlock(&hw->aq.asq_mutex); 1047 return status; 1048 } 1049 1050 int 1051 i40e_asq_send_command_v2(struct i40e_hw *hw, struct i40e_aq_desc *desc, 1052 void *buff, /* can be NULL */ u16 buff_size, 1053 struct i40e_asq_cmd_details *cmd_details, 1054 enum i40e_admin_queue_err *aq_status) 1055 { 1056 return i40e_asq_send_command_atomic_v2(hw, desc, buff, buff_size, 1057 cmd_details, true, aq_status); 1058 } 1059 1060 /** 1061 * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function 1062 * @desc: pointer to the temp descriptor (non DMA mem) 1063 * @opcode: the opcode can be used to decide which flags to turn off or on 1064 * 1065 * Fill the desc with default values 1066 **/ 1067 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, 1068 u16 opcode) 1069 { 1070 /* zero out the desc */ 1071 memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); 1072 desc->opcode = cpu_to_le16(opcode); 1073 desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI); 1074 } 1075 1076 /** 1077 * i40e_clean_arq_element 1078 * @hw: pointer to the hw struct 1079 * @e: event info from the receive descriptor, includes any buffers 1080 * @pending: number of events that could be left to process 1081 * 1082 * This function cleans one Admin Receive Queue element and returns 1083 * the contents through e. It can also return how many events are 1084 * left to process through 'pending' 1085 **/ 1086 int i40e_clean_arq_element(struct i40e_hw *hw, 1087 struct i40e_arq_event_info *e, 1088 u16 *pending) 1089 { 1090 u16 ntc = hw->aq.arq.next_to_clean; 1091 struct i40e_aq_desc *desc; 1092 struct i40e_dma_mem *bi; 1093 int ret_code = 0; 1094 u16 desc_idx; 1095 u16 datalen; 1096 u16 flags; 1097 u16 ntu; 1098 1099 /* pre-clean the event info */ 1100 memset(&e->desc, 0, sizeof(e->desc)); 1101 1102 /* take the lock before we start messing with the ring */ 1103 mutex_lock(&hw->aq.arq_mutex); 1104 1105 if (hw->aq.arq.count == 0) { 1106 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 1107 "AQRX: Admin queue not initialized.\n"); 1108 ret_code = -EIO; 1109 goto clean_arq_element_err; 1110 } 1111 1112 /* set next_to_use to head */ 1113 ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK; 1114 if (ntu == ntc) { 1115 /* nothing to do - shouldn't need to update ring's values */ 1116 ret_code = -EALREADY; 1117 goto clean_arq_element_out; 1118 } 1119 1120 /* now clean the next descriptor */ 1121 desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc); 1122 desc_idx = ntc; 1123 1124 hw->aq.arq_last_status = 1125 (enum i40e_admin_queue_err)le16_to_cpu(desc->retval); 1126 flags = le16_to_cpu(desc->flags); 1127 if (flags & I40E_AQ_FLAG_ERR) { 1128 ret_code = -EIO; 1129 i40e_debug(hw, 1130 I40E_DEBUG_AQ_MESSAGE, 1131 "AQRX: Event received with error 0x%X.\n", 1132 hw->aq.arq_last_status); 1133 } 1134 1135 e->desc = *desc; 1136 datalen = le16_to_cpu(desc->datalen); 1137 e->msg_len = min(datalen, e->buf_len); 1138 if (e->msg_buf != NULL && (e->msg_len != 0)) 1139 memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va, 1140 e->msg_len); 1141 1142 i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQRX: desc and buffer:\n"); 1143 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, 1144 hw->aq.arq_buf_size); 1145 1146 /* Restore the original datalen and buffer address in the desc, 1147 * FW updates datalen to indicate the event message 1148 * size 1149 */ 1150 bi = &hw->aq.arq.r.arq_bi[ntc]; 1151 memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); 1152 1153 desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF); 1154 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) 1155 desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB); 1156 desc->datalen = cpu_to_le16((u16)bi->size); 1157 desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa)); 1158 desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); 1159 1160 /* set tail = the last cleaned desc index. */ 1161 wr32(hw, hw->aq.arq.tail, ntc); 1162 /* ntc is updated to tail + 1 */ 1163 ntc++; 1164 if (ntc == hw->aq.num_arq_entries) 1165 ntc = 0; 1166 hw->aq.arq.next_to_clean = ntc; 1167 hw->aq.arq.next_to_use = ntu; 1168 1169 i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode), &e->desc); 1170 clean_arq_element_out: 1171 /* Set pending if needed, unlock and return */ 1172 if (pending) 1173 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); 1174 clean_arq_element_err: 1175 mutex_unlock(&hw->aq.arq_mutex); 1176 1177 return ret_code; 1178 } 1179 1180 static void i40e_resume_aq(struct i40e_hw *hw) 1181 { 1182 /* Registers are reset after PF reset */ 1183 hw->aq.asq.next_to_use = 0; 1184 hw->aq.asq.next_to_clean = 0; 1185 1186 i40e_config_asq_regs(hw); 1187 1188 hw->aq.arq.next_to_use = 0; 1189 hw->aq.arq.next_to_clean = 0; 1190 1191 i40e_config_arq_regs(hw); 1192 } 1193