1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 /* QLogic qed NIC Driver 3 * Copyright (c) 2015-2017 QLogic Corporation 4 * Copyright (c) 2019-2020 Marvell International Ltd. 5 */ 6 7 #include <linux/types.h> 8 #include <asm/byteorder.h> 9 #include <linux/io.h> 10 #include <linux/delay.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/errno.h> 13 #include <linux/kernel.h> 14 #include <linux/list.h> 15 #include <linux/pci.h> 16 #include <linux/slab.h> 17 #include <linux/spinlock.h> 18 #include <linux/string.h> 19 #include "qed.h" 20 #include "qed_cxt.h" 21 #include "qed_dev_api.h" 22 #include "qed_hsi.h" 23 #include "qed_iro_hsi.h" 24 #include "qed_hw.h" 25 #include "qed_int.h" 26 #include "qed_iscsi.h" 27 #include "qed_mcp.h" 28 #include "qed_ooo.h" 29 #include "qed_reg_addr.h" 30 #include "qed_sp.h" 31 #include "qed_sriov.h" 32 #include "qed_rdma.h" 33 34 /*************************************************************************** 35 * Structures & Definitions 36 ***************************************************************************/ 37 38 #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1) 39 40 #define SPQ_BLOCK_DELAY_MAX_ITER (10) 41 #define SPQ_BLOCK_DELAY_US (10) 42 #define SPQ_BLOCK_SLEEP_MAX_ITER (1000) 43 #define SPQ_BLOCK_SLEEP_MS (5) 44 45 /*************************************************************************** 46 * Blocking Imp. (BLOCK/EBLOCK mode) 47 ***************************************************************************/ 48 static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn, 49 void *cookie, 50 union event_ring_data *data, u8 fw_return_code) 51 { 52 struct qed_spq_comp_done *comp_done; 53 54 comp_done = (struct qed_spq_comp_done *)cookie; 55 56 comp_done->fw_return_code = fw_return_code; 57 58 /* Make sure completion done is visible on waiting thread */ 59 smp_store_release(&comp_done->done, 0x1); 60 } 61 62 static int __qed_spq_block(struct qed_hwfn *p_hwfn, 63 struct qed_spq_entry *p_ent, 64 u8 *p_fw_ret, bool sleep_between_iter) 65 { 66 struct qed_spq_comp_done *comp_done; 67 u32 iter_cnt; 68 69 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie; 70 iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER 71 : SPQ_BLOCK_DELAY_MAX_ITER; 72 73 while (iter_cnt--) { 74 /* Validate we receive completion update */ 75 if (smp_load_acquire(&comp_done->done) == 1) { /* ^^^ */ 76 if (p_fw_ret) 77 *p_fw_ret = comp_done->fw_return_code; 78 return 0; 79 } 80 81 if (sleep_between_iter) 82 msleep(SPQ_BLOCK_SLEEP_MS); 83 else 84 udelay(SPQ_BLOCK_DELAY_US); 85 } 86 87 return -EBUSY; 88 } 89 90 static int qed_spq_block(struct qed_hwfn *p_hwfn, 91 struct qed_spq_entry *p_ent, 92 u8 *p_fw_ret, bool skip_quick_poll) 93 { 94 struct qed_spq_comp_done *comp_done; 95 struct qed_ptt *p_ptt; 96 int rc; 97 98 /* A relatively short polling period w/o sleeping, to allow the FW to 99 * complete the ramrod and thus possibly to avoid the following sleeps. 100 */ 101 if (!skip_quick_poll) { 102 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false); 103 if (!rc) 104 return 0; 105 } 106 107 /* Move to polling with a sleeping period between iterations */ 108 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true); 109 if (!rc) 110 return 0; 111 112 p_ptt = qed_ptt_acquire(p_hwfn); 113 if (!p_ptt) { 114 DP_NOTICE(p_hwfn, "ptt, failed to acquire\n"); 115 return -EAGAIN; 116 } 117 118 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n"); 119 rc = qed_mcp_drain(p_hwfn, p_ptt); 120 qed_ptt_release(p_hwfn, p_ptt); 121 if (rc) { 122 DP_NOTICE(p_hwfn, "MCP drain failed\n"); 123 goto err; 124 } 125 126 /* Retry after drain */ 127 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true); 128 if (!rc) 129 return 0; 130 131 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie; 132 if (comp_done->done == 1) { 133 if (p_fw_ret) 134 *p_fw_ret = comp_done->fw_return_code; 135 return 0; 136 } 137 err: 138 p_ptt = qed_ptt_acquire(p_hwfn); 139 if (!p_ptt) 140 return -EBUSY; 141 qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_RAMROD_FAIL, 142 "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n", 143 le32_to_cpu(p_ent->elem.hdr.cid), 144 p_ent->elem.hdr.cmd_id, 145 p_ent->elem.hdr.protocol_id, 146 le16_to_cpu(p_ent->elem.hdr.echo)); 147 qed_ptt_release(p_hwfn, p_ptt); 148 149 return -EBUSY; 150 } 151 152 /*************************************************************************** 153 * SPQ entries inner API 154 ***************************************************************************/ 155 static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn, 156 struct qed_spq_entry *p_ent) 157 { 158 p_ent->flags = 0; 159 160 switch (p_ent->comp_mode) { 161 case QED_SPQ_MODE_EBLOCK: 162 case QED_SPQ_MODE_BLOCK: 163 p_ent->comp_cb.function = qed_spq_blocking_cb; 164 break; 165 case QED_SPQ_MODE_CB: 166 break; 167 default: 168 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n", 169 p_ent->comp_mode); 170 return -EINVAL; 171 } 172 173 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, 174 "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n", 175 p_ent->elem.hdr.cid, 176 p_ent->elem.hdr.cmd_id, 177 p_ent->elem.hdr.protocol_id, 178 p_ent->elem.data_ptr.hi, 179 p_ent->elem.data_ptr.lo, 180 D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK, 181 QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK", 182 "MODE_CB")); 183 184 return 0; 185 } 186 187 /*************************************************************************** 188 * HSI access 189 ***************************************************************************/ 190 static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn, 191 struct qed_spq *p_spq) 192 { 193 struct core_conn_context *p_cxt; 194 struct qed_cxt_info cxt_info; 195 u16 physical_q; 196 int rc; 197 198 cxt_info.iid = p_spq->cid; 199 200 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info); 201 202 if (rc < 0) { 203 DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n", 204 p_spq->cid); 205 return; 206 } 207 208 p_cxt = cxt_info.p_cxt; 209 210 SET_FIELD(p_cxt->xstorm_ag_context.flags10, 211 XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1); 212 SET_FIELD(p_cxt->xstorm_ag_context.flags1, 213 XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1); 214 SET_FIELD(p_cxt->xstorm_ag_context.flags9, 215 XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1); 216 217 /* QM physical queue */ 218 physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB); 219 p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q); 220 221 p_cxt->xstorm_st_context.spq_base_lo = 222 DMA_LO_LE(p_spq->chain.p_phys_addr); 223 p_cxt->xstorm_st_context.spq_base_hi = 224 DMA_HI_LE(p_spq->chain.p_phys_addr); 225 226 DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr, 227 p_hwfn->p_consq->chain.p_phys_addr); 228 } 229 230 static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, 231 struct qed_spq *p_spq, struct qed_spq_entry *p_ent) 232 { 233 struct qed_chain *p_chain = &p_hwfn->p_spq->chain; 234 struct core_db_data *p_db_data = &p_spq->db_data; 235 u16 echo = qed_chain_get_prod_idx(p_chain); 236 struct slow_path_element *elem; 237 238 p_ent->elem.hdr.echo = cpu_to_le16(echo); 239 elem = qed_chain_produce(p_chain); 240 if (!elem) { 241 DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n"); 242 return -EINVAL; 243 } 244 245 *elem = p_ent->elem; /* struct assignment */ 246 247 /* send a doorbell on the slow hwfn session */ 248 p_db_data->spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain)); 249 250 /* make sure the SPQE is updated before the doorbell */ 251 wmb(); 252 253 DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data); 254 255 /* make sure doorbell is rang */ 256 wmb(); 257 258 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, 259 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n", 260 p_spq->db_addr_offset, 261 p_spq->cid, 262 p_db_data->params, 263 p_db_data->agg_flags, qed_chain_get_prod_idx(p_chain)); 264 265 return 0; 266 } 267 268 /*************************************************************************** 269 * Asynchronous events 270 ***************************************************************************/ 271 static int 272 qed_async_event_completion(struct qed_hwfn *p_hwfn, 273 struct event_ring_entry *p_eqe) 274 { 275 qed_spq_async_comp_cb cb; 276 277 if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE)) 278 return -EINVAL; 279 280 cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id]; 281 if (cb) { 282 return cb(p_hwfn, p_eqe->opcode, p_eqe->echo, 283 &p_eqe->data, p_eqe->fw_return_code); 284 } else { 285 DP_NOTICE(p_hwfn, 286 "Unknown Async completion for protocol: %d\n", 287 p_eqe->protocol_id); 288 return -EINVAL; 289 } 290 } 291 292 int 293 qed_spq_register_async_cb(struct qed_hwfn *p_hwfn, 294 enum protocol_type protocol_id, 295 qed_spq_async_comp_cb cb) 296 { 297 if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE)) 298 return -EINVAL; 299 300 p_hwfn->p_spq->async_comp_cb[protocol_id] = cb; 301 return 0; 302 } 303 304 void 305 qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn, 306 enum protocol_type protocol_id) 307 { 308 if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE)) 309 return; 310 311 p_hwfn->p_spq->async_comp_cb[protocol_id] = NULL; 312 } 313 314 /*************************************************************************** 315 * EQ API 316 ***************************************************************************/ 317 void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod) 318 { 319 u32 addr = GTT_BAR0_MAP_REG_USDM_RAM + 320 USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id); 321 322 REG_WR16(p_hwfn, addr, prod); 323 } 324 325 int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie) 326 { 327 struct qed_eq *p_eq = cookie; 328 struct qed_chain *p_chain = &p_eq->chain; 329 int rc = 0; 330 331 /* take a snapshot of the FW consumer */ 332 u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons); 333 334 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx); 335 336 /* Need to guarantee the fw_cons index we use points to a usuable 337 * element (to comply with our chain), so our macros would comply 338 */ 339 if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) == 340 qed_chain_get_usable_per_page(p_chain)) 341 fw_cons_idx += qed_chain_get_unusable_per_page(p_chain); 342 343 /* Complete current segment of eq entries */ 344 while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) { 345 struct event_ring_entry *p_eqe = qed_chain_consume(p_chain); 346 347 if (!p_eqe) { 348 rc = -EINVAL; 349 break; 350 } 351 352 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, 353 "op %x prot %x res0 %x echo %x fwret %x flags %x\n", 354 p_eqe->opcode, 355 p_eqe->protocol_id, 356 p_eqe->reserved0, 357 le16_to_cpu(p_eqe->echo), 358 p_eqe->fw_return_code, 359 p_eqe->flags); 360 361 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) { 362 if (qed_async_event_completion(p_hwfn, p_eqe)) 363 rc = -EINVAL; 364 } else if (qed_spq_completion(p_hwfn, 365 p_eqe->echo, 366 p_eqe->fw_return_code, 367 &p_eqe->data)) { 368 rc = -EINVAL; 369 } 370 371 qed_chain_recycle_consumed(p_chain); 372 } 373 374 qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain)); 375 376 /* Attempt to post pending requests */ 377 spin_lock_bh(&p_hwfn->p_spq->lock); 378 rc = qed_spq_pend_post(p_hwfn); 379 spin_unlock_bh(&p_hwfn->p_spq->lock); 380 381 return rc; 382 } 383 384 int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem) 385 { 386 struct qed_chain_init_params params = { 387 .mode = QED_CHAIN_MODE_PBL, 388 .intended_use = QED_CHAIN_USE_TO_PRODUCE, 389 .cnt_type = QED_CHAIN_CNT_TYPE_U16, 390 .num_elems = num_elem, 391 .elem_size = sizeof(union event_ring_element), 392 }; 393 struct qed_eq *p_eq; 394 int ret; 395 396 /* Allocate EQ struct */ 397 p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL); 398 if (!p_eq) 399 return -ENOMEM; 400 401 ret = qed_chain_alloc(p_hwfn->cdev, &p_eq->chain, ¶ms); 402 if (ret) { 403 DP_NOTICE(p_hwfn, "Failed to allocate EQ chain\n"); 404 goto eq_allocate_fail; 405 } 406 407 /* register EQ completion on the SP SB */ 408 qed_int_register_cb(p_hwfn, qed_eq_completion, 409 p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons); 410 411 p_hwfn->p_eq = p_eq; 412 return 0; 413 414 eq_allocate_fail: 415 kfree(p_eq); 416 417 return ret; 418 } 419 420 void qed_eq_setup(struct qed_hwfn *p_hwfn) 421 { 422 qed_chain_reset(&p_hwfn->p_eq->chain); 423 } 424 425 void qed_eq_free(struct qed_hwfn *p_hwfn) 426 { 427 if (!p_hwfn->p_eq) 428 return; 429 430 qed_chain_free(p_hwfn->cdev, &p_hwfn->p_eq->chain); 431 432 kfree(p_hwfn->p_eq); 433 p_hwfn->p_eq = NULL; 434 } 435 436 /*************************************************************************** 437 * CQE API - manipulate EQ functionality 438 ***************************************************************************/ 439 static int qed_cqe_completion(struct qed_hwfn *p_hwfn, 440 struct eth_slow_path_rx_cqe *cqe, 441 enum protocol_type protocol) 442 { 443 if (IS_VF(p_hwfn->cdev)) 444 return 0; 445 446 /* @@@tmp - it's possible we'll eventually want to handle some 447 * actual commands that can arrive here, but for now this is only 448 * used to complete the ramrod using the echo value on the cqe 449 */ 450 return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL); 451 } 452 453 int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn, 454 struct eth_slow_path_rx_cqe *cqe) 455 { 456 int rc; 457 458 rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH); 459 if (rc) 460 DP_NOTICE(p_hwfn, 461 "Failed to handle RXQ CQE [cmd 0x%02x]\n", 462 cqe->ramrod_cmd_id); 463 464 return rc; 465 } 466 467 /*************************************************************************** 468 * Slow hwfn Queue (spq) 469 ***************************************************************************/ 470 void qed_spq_setup(struct qed_hwfn *p_hwfn) 471 { 472 struct qed_spq *p_spq = p_hwfn->p_spq; 473 struct qed_spq_entry *p_virt = NULL; 474 struct core_db_data *p_db_data; 475 void __iomem *db_addr; 476 dma_addr_t p_phys = 0; 477 u32 i, capacity; 478 int rc; 479 480 INIT_LIST_HEAD(&p_spq->pending); 481 INIT_LIST_HEAD(&p_spq->completion_pending); 482 INIT_LIST_HEAD(&p_spq->free_pool); 483 INIT_LIST_HEAD(&p_spq->unlimited_pending); 484 spin_lock_init(&p_spq->lock); 485 486 /* SPQ empty pool */ 487 p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod); 488 p_virt = p_spq->p_virt; 489 490 capacity = qed_chain_get_capacity(&p_spq->chain); 491 for (i = 0; i < capacity; i++) { 492 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys); 493 494 list_add_tail(&p_virt->list, &p_spq->free_pool); 495 496 p_virt++; 497 p_phys += sizeof(struct qed_spq_entry); 498 } 499 500 /* Statistics */ 501 p_spq->normal_count = 0; 502 p_spq->comp_count = 0; 503 p_spq->comp_sent_count = 0; 504 p_spq->unlimited_pending_count = 0; 505 506 bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE); 507 p_spq->comp_bitmap_idx = 0; 508 509 /* SPQ cid, cannot fail */ 510 qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid); 511 qed_spq_hw_initialize(p_hwfn, p_spq); 512 513 /* reset the chain itself */ 514 qed_chain_reset(&p_spq->chain); 515 516 /* Initialize the address/data of the SPQ doorbell */ 517 p_spq->db_addr_offset = qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY); 518 p_db_data = &p_spq->db_data; 519 memset(p_db_data, 0, sizeof(*p_db_data)); 520 SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM); 521 SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX); 522 SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL, 523 DQ_XCM_CORE_SPQ_PROD_CMD); 524 p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD; 525 526 /* Register the SPQ doorbell with the doorbell recovery mechanism */ 527 db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells + 528 p_spq->db_addr_offset); 529 rc = qed_db_recovery_add(p_hwfn->cdev, db_addr, &p_spq->db_data, 530 DB_REC_WIDTH_32B, DB_REC_KERNEL); 531 if (rc) 532 DP_INFO(p_hwfn, 533 "Failed to register the SPQ doorbell with the doorbell recovery mechanism\n"); 534 } 535 536 int qed_spq_alloc(struct qed_hwfn *p_hwfn) 537 { 538 struct qed_chain_init_params params = { 539 .mode = QED_CHAIN_MODE_SINGLE, 540 .intended_use = QED_CHAIN_USE_TO_PRODUCE, 541 .cnt_type = QED_CHAIN_CNT_TYPE_U16, 542 .elem_size = sizeof(struct slow_path_element), 543 }; 544 struct qed_dev *cdev = p_hwfn->cdev; 545 struct qed_spq_entry *p_virt = NULL; 546 struct qed_spq *p_spq = NULL; 547 dma_addr_t p_phys = 0; 548 u32 capacity; 549 int ret; 550 551 /* SPQ struct */ 552 p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL); 553 if (!p_spq) 554 return -ENOMEM; 555 556 /* SPQ ring */ 557 ret = qed_chain_alloc(cdev, &p_spq->chain, ¶ms); 558 if (ret) { 559 DP_NOTICE(p_hwfn, "Failed to allocate SPQ chain\n"); 560 goto spq_chain_alloc_fail; 561 } 562 563 /* allocate and fill the SPQ elements (incl. ramrod data list) */ 564 capacity = qed_chain_get_capacity(&p_spq->chain); 565 ret = -ENOMEM; 566 567 p_virt = dma_alloc_coherent(&cdev->pdev->dev, 568 capacity * sizeof(struct qed_spq_entry), 569 &p_phys, GFP_KERNEL); 570 if (!p_virt) 571 goto spq_alloc_fail; 572 573 p_spq->p_virt = p_virt; 574 p_spq->p_phys = p_phys; 575 p_hwfn->p_spq = p_spq; 576 577 return 0; 578 579 spq_alloc_fail: 580 qed_chain_free(cdev, &p_spq->chain); 581 spq_chain_alloc_fail: 582 kfree(p_spq); 583 584 return ret; 585 } 586 587 void qed_spq_free(struct qed_hwfn *p_hwfn) 588 { 589 struct qed_spq *p_spq = p_hwfn->p_spq; 590 void __iomem *db_addr; 591 u32 capacity; 592 593 if (!p_spq) 594 return; 595 596 /* Delete the SPQ doorbell from the doorbell recovery mechanism */ 597 db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells + 598 p_spq->db_addr_offset); 599 qed_db_recovery_del(p_hwfn->cdev, db_addr, &p_spq->db_data); 600 601 if (p_spq->p_virt) { 602 capacity = qed_chain_get_capacity(&p_spq->chain); 603 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 604 capacity * 605 sizeof(struct qed_spq_entry), 606 p_spq->p_virt, p_spq->p_phys); 607 } 608 609 qed_chain_free(p_hwfn->cdev, &p_spq->chain); 610 kfree(p_spq); 611 p_hwfn->p_spq = NULL; 612 } 613 614 int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent) 615 { 616 struct qed_spq *p_spq = p_hwfn->p_spq; 617 struct qed_spq_entry *p_ent = NULL; 618 int rc = 0; 619 620 spin_lock_bh(&p_spq->lock); 621 622 if (list_empty(&p_spq->free_pool)) { 623 p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC); 624 if (!p_ent) { 625 DP_NOTICE(p_hwfn, 626 "Failed to allocate an SPQ entry for a pending ramrod\n"); 627 rc = -ENOMEM; 628 goto out_unlock; 629 } 630 p_ent->queue = &p_spq->unlimited_pending; 631 } else { 632 p_ent = list_first_entry(&p_spq->free_pool, 633 struct qed_spq_entry, list); 634 list_del(&p_ent->list); 635 p_ent->queue = &p_spq->pending; 636 } 637 638 *pp_ent = p_ent; 639 640 out_unlock: 641 spin_unlock_bh(&p_spq->lock); 642 return rc; 643 } 644 645 /* Locked variant; Should be called while the SPQ lock is taken */ 646 static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn, 647 struct qed_spq_entry *p_ent) 648 { 649 list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool); 650 } 651 652 void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent) 653 { 654 spin_lock_bh(&p_hwfn->p_spq->lock); 655 __qed_spq_return_entry(p_hwfn, p_ent); 656 spin_unlock_bh(&p_hwfn->p_spq->lock); 657 } 658 659 /** 660 * qed_spq_add_entry() - Add a new entry to the pending list. 661 * Should be used while lock is being held. 662 * 663 * @p_hwfn: HW device data. 664 * @p_ent: An entry to add. 665 * @priority: Desired priority. 666 * 667 * Adds an entry to the pending list is there is room (an empty 668 * element is available in the free_pool), or else places the 669 * entry in the unlimited_pending pool. 670 * 671 * Return: zero on success, -EINVAL on invalid @priority. 672 */ 673 static int qed_spq_add_entry(struct qed_hwfn *p_hwfn, 674 struct qed_spq_entry *p_ent, 675 enum spq_priority priority) 676 { 677 struct qed_spq *p_spq = p_hwfn->p_spq; 678 679 if (p_ent->queue == &p_spq->unlimited_pending) { 680 681 if (list_empty(&p_spq->free_pool)) { 682 list_add_tail(&p_ent->list, &p_spq->unlimited_pending); 683 p_spq->unlimited_pending_count++; 684 685 return 0; 686 } else { 687 struct qed_spq_entry *p_en2; 688 689 p_en2 = list_first_entry(&p_spq->free_pool, 690 struct qed_spq_entry, list); 691 list_del(&p_en2->list); 692 693 /* Copy the ring element physical pointer to the new 694 * entry, since we are about to override the entire ring 695 * entry and don't want to lose the pointer. 696 */ 697 p_ent->elem.data_ptr = p_en2->elem.data_ptr; 698 699 *p_en2 = *p_ent; 700 701 /* EBLOCK responsible to free the allocated p_ent */ 702 if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK) 703 kfree(p_ent); 704 else 705 p_ent->post_ent = p_en2; 706 707 p_ent = p_en2; 708 } 709 } 710 711 /* entry is to be placed in 'pending' queue */ 712 switch (priority) { 713 case QED_SPQ_PRIORITY_NORMAL: 714 list_add_tail(&p_ent->list, &p_spq->pending); 715 p_spq->normal_count++; 716 break; 717 case QED_SPQ_PRIORITY_HIGH: 718 list_add(&p_ent->list, &p_spq->pending); 719 p_spq->high_count++; 720 break; 721 default: 722 return -EINVAL; 723 } 724 725 return 0; 726 } 727 728 /*************************************************************************** 729 * Accessor 730 ***************************************************************************/ 731 u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn) 732 { 733 if (!p_hwfn->p_spq) 734 return 0xffffffff; /* illegal */ 735 return p_hwfn->p_spq->cid; 736 } 737 738 /*************************************************************************** 739 * Posting new Ramrods 740 ***************************************************************************/ 741 static int qed_spq_post_list(struct qed_hwfn *p_hwfn, 742 struct list_head *head, u32 keep_reserve) 743 { 744 struct qed_spq *p_spq = p_hwfn->p_spq; 745 int rc; 746 747 while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve && 748 !list_empty(head)) { 749 struct qed_spq_entry *p_ent = 750 list_first_entry(head, struct qed_spq_entry, list); 751 list_move_tail(&p_ent->list, &p_spq->completion_pending); 752 p_spq->comp_sent_count++; 753 754 rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent); 755 if (rc) { 756 list_del(&p_ent->list); 757 __qed_spq_return_entry(p_hwfn, p_ent); 758 return rc; 759 } 760 } 761 762 return 0; 763 } 764 765 int qed_spq_pend_post(struct qed_hwfn *p_hwfn) 766 { 767 struct qed_spq *p_spq = p_hwfn->p_spq; 768 struct qed_spq_entry *p_ent = NULL; 769 770 while (!list_empty(&p_spq->free_pool)) { 771 if (list_empty(&p_spq->unlimited_pending)) 772 break; 773 774 p_ent = list_first_entry(&p_spq->unlimited_pending, 775 struct qed_spq_entry, list); 776 if (!p_ent) 777 return -EINVAL; 778 779 list_del(&p_ent->list); 780 781 qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority); 782 } 783 784 return qed_spq_post_list(p_hwfn, &p_spq->pending, 785 SPQ_HIGH_PRI_RESERVE_DEFAULT); 786 } 787 788 static void qed_spq_recov_set_ret_code(struct qed_spq_entry *p_ent, 789 u8 *fw_return_code) 790 { 791 if (!fw_return_code) 792 return; 793 794 if (p_ent->elem.hdr.protocol_id == PROTOCOLID_ROCE || 795 p_ent->elem.hdr.protocol_id == PROTOCOLID_IWARP) 796 *fw_return_code = RDMA_RETURN_OK; 797 } 798 799 /* Avoid overriding of SPQ entries when getting out-of-order completions, by 800 * marking the completions in a bitmap and increasing the chain consumer only 801 * for the first successive completed entries. 802 */ 803 static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo) 804 { 805 u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE; 806 struct qed_spq *p_spq = p_hwfn->p_spq; 807 808 __set_bit(pos, p_spq->p_comp_bitmap); 809 while (test_bit(p_spq->comp_bitmap_idx, 810 p_spq->p_comp_bitmap)) { 811 __clear_bit(p_spq->comp_bitmap_idx, 812 p_spq->p_comp_bitmap); 813 p_spq->comp_bitmap_idx++; 814 qed_chain_return_produced(&p_spq->chain); 815 } 816 } 817 818 int qed_spq_post(struct qed_hwfn *p_hwfn, 819 struct qed_spq_entry *p_ent, u8 *fw_return_code) 820 { 821 int rc = 0; 822 struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL; 823 bool b_ret_ent = true; 824 bool eblock; 825 826 if (!p_hwfn) 827 return -EINVAL; 828 829 if (!p_ent) { 830 DP_NOTICE(p_hwfn, "Got a NULL pointer\n"); 831 return -EINVAL; 832 } 833 834 if (p_hwfn->cdev->recov_in_prog) { 835 DP_VERBOSE(p_hwfn, 836 QED_MSG_SPQ, 837 "Recovery is in progress. Skip spq post [cmd %02x protocol %02x]\n", 838 p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id); 839 840 /* Let the flow complete w/o any error handling */ 841 qed_spq_recov_set_ret_code(p_ent, fw_return_code); 842 return 0; 843 } 844 845 /* Complete the entry */ 846 rc = qed_spq_fill_entry(p_hwfn, p_ent); 847 848 spin_lock_bh(&p_spq->lock); 849 850 /* Check return value after LOCK is taken for cleaner error flow */ 851 if (rc) 852 goto spq_post_fail; 853 854 /* Check if entry is in block mode before qed_spq_add_entry, 855 * which might kfree p_ent. 856 */ 857 eblock = (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK); 858 859 /* Add the request to the pending queue */ 860 rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority); 861 if (rc) 862 goto spq_post_fail; 863 864 rc = qed_spq_pend_post(p_hwfn); 865 if (rc) { 866 /* Since it's possible that pending failed for a different 867 * entry [although unlikely], the failed entry was already 868 * dealt with; No need to return it here. 869 */ 870 b_ret_ent = false; 871 goto spq_post_fail; 872 } 873 874 spin_unlock_bh(&p_spq->lock); 875 876 if (eblock) { 877 /* For entries in QED BLOCK mode, the completion code cannot 878 * perform the necessary cleanup - if it did, we couldn't 879 * access p_ent here to see whether it's successful or not. 880 * Thus, after gaining the answer perform the cleanup here. 881 */ 882 rc = qed_spq_block(p_hwfn, p_ent, fw_return_code, 883 p_ent->queue == &p_spq->unlimited_pending); 884 885 if (p_ent->queue == &p_spq->unlimited_pending) { 886 struct qed_spq_entry *p_post_ent = p_ent->post_ent; 887 888 kfree(p_ent); 889 890 /* Return the entry which was actually posted */ 891 p_ent = p_post_ent; 892 } 893 894 if (rc) 895 goto spq_post_fail2; 896 897 /* return to pool */ 898 qed_spq_return_entry(p_hwfn, p_ent); 899 } 900 return rc; 901 902 spq_post_fail2: 903 spin_lock_bh(&p_spq->lock); 904 list_del(&p_ent->list); 905 qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo); 906 907 spq_post_fail: 908 /* return to the free pool */ 909 if (b_ret_ent) 910 __qed_spq_return_entry(p_hwfn, p_ent); 911 spin_unlock_bh(&p_spq->lock); 912 913 return rc; 914 } 915 916 int qed_spq_completion(struct qed_hwfn *p_hwfn, 917 __le16 echo, 918 u8 fw_return_code, 919 union event_ring_data *p_data) 920 { 921 struct qed_spq *p_spq; 922 struct qed_spq_entry *p_ent = NULL; 923 struct qed_spq_entry *tmp; 924 struct qed_spq_entry *found = NULL; 925 926 if (!p_hwfn) 927 return -EINVAL; 928 929 p_spq = p_hwfn->p_spq; 930 if (!p_spq) 931 return -EINVAL; 932 933 spin_lock_bh(&p_spq->lock); 934 list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) { 935 if (p_ent->elem.hdr.echo == echo) { 936 list_del(&p_ent->list); 937 qed_spq_comp_bmap_update(p_hwfn, echo); 938 p_spq->comp_count++; 939 found = p_ent; 940 break; 941 } 942 943 /* This is relatively uncommon - depends on scenarios 944 * which have mutliple per-PF sent ramrods. 945 */ 946 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, 947 "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n", 948 le16_to_cpu(echo), 949 le16_to_cpu(p_ent->elem.hdr.echo)); 950 } 951 952 /* Release lock before callback, as callback may post 953 * an additional ramrod. 954 */ 955 spin_unlock_bh(&p_spq->lock); 956 957 if (!found) { 958 DP_NOTICE(p_hwfn, 959 "Failed to find an entry this EQE [echo %04x] completes\n", 960 le16_to_cpu(echo)); 961 return -EEXIST; 962 } 963 964 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, 965 "Complete EQE [echo %04x]: func %p cookie %p)\n", 966 le16_to_cpu(echo), 967 p_ent->comp_cb.function, p_ent->comp_cb.cookie); 968 if (found->comp_cb.function) 969 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data, 970 fw_return_code); 971 else 972 DP_VERBOSE(p_hwfn, 973 QED_MSG_SPQ, 974 "Got a completion without a callback function\n"); 975 976 if (found->comp_mode != QED_SPQ_MODE_EBLOCK) 977 /* EBLOCK is responsible for returning its own entry into the 978 * free list. 979 */ 980 qed_spq_return_entry(p_hwfn, found); 981 982 return 0; 983 } 984 985 #define QED_SPQ_CONSQ_ELEM_SIZE 0x80 986 987 int qed_consq_alloc(struct qed_hwfn *p_hwfn) 988 { 989 struct qed_chain_init_params params = { 990 .mode = QED_CHAIN_MODE_PBL, 991 .intended_use = QED_CHAIN_USE_TO_PRODUCE, 992 .cnt_type = QED_CHAIN_CNT_TYPE_U16, 993 .num_elems = QED_CHAIN_PAGE_SIZE / QED_SPQ_CONSQ_ELEM_SIZE, 994 .elem_size = QED_SPQ_CONSQ_ELEM_SIZE, 995 }; 996 struct qed_consq *p_consq; 997 int ret; 998 999 /* Allocate ConsQ struct */ 1000 p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL); 1001 if (!p_consq) 1002 return -ENOMEM; 1003 1004 /* Allocate and initialize ConsQ chain */ 1005 ret = qed_chain_alloc(p_hwfn->cdev, &p_consq->chain, ¶ms); 1006 if (ret) { 1007 DP_NOTICE(p_hwfn, "Failed to allocate ConsQ chain"); 1008 goto consq_alloc_fail; 1009 } 1010 1011 p_hwfn->p_consq = p_consq; 1012 1013 return 0; 1014 1015 consq_alloc_fail: 1016 kfree(p_consq); 1017 1018 return ret; 1019 } 1020 1021 void qed_consq_setup(struct qed_hwfn *p_hwfn) 1022 { 1023 qed_chain_reset(&p_hwfn->p_consq->chain); 1024 } 1025 1026 void qed_consq_free(struct qed_hwfn *p_hwfn) 1027 { 1028 if (!p_hwfn->p_consq) 1029 return; 1030 1031 qed_chain_free(p_hwfn->cdev, &p_hwfn->p_consq->chain); 1032 1033 kfree(p_hwfn->p_consq); 1034 p_hwfn->p_consq = NULL; 1035 } 1036