1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 /* QLogic qed NIC Driver 3 * Copyright (c) 2015-2017 QLogic Corporation 4 */ 5 6 #include <linux/types.h> 7 #include <asm/byteorder.h> 8 #include <linux/io.h> 9 #include <linux/delay.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/errno.h> 12 #include <linux/kernel.h> 13 #include <linux/list.h> 14 #include <linux/pci.h> 15 #include <linux/slab.h> 16 #include <linux/spinlock.h> 17 #include <linux/string.h> 18 #include "qed.h" 19 #include "qed_cxt.h" 20 #include "qed_dev_api.h" 21 #include "qed_hsi.h" 22 #include "qed_hw.h" 23 #include "qed_int.h" 24 #include "qed_iscsi.h" 25 #include "qed_mcp.h" 26 #include "qed_ooo.h" 27 #include "qed_reg_addr.h" 28 #include "qed_sp.h" 29 #include "qed_sriov.h" 30 #include "qed_rdma.h" 31 32 /*************************************************************************** 33 * Structures & Definitions 34 ***************************************************************************/ 35 36 #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1) 37 38 #define SPQ_BLOCK_DELAY_MAX_ITER (10) 39 #define SPQ_BLOCK_DELAY_US (10) 40 #define SPQ_BLOCK_SLEEP_MAX_ITER (1000) 41 #define SPQ_BLOCK_SLEEP_MS (5) 42 43 /*************************************************************************** 44 * Blocking Imp. (BLOCK/EBLOCK mode) 45 ***************************************************************************/ 46 static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn, 47 void *cookie, 48 union event_ring_data *data, u8 fw_return_code) 49 { 50 struct qed_spq_comp_done *comp_done; 51 52 comp_done = (struct qed_spq_comp_done *)cookie; 53 54 comp_done->fw_return_code = fw_return_code; 55 56 /* Make sure completion done is visible on waiting thread */ 57 smp_store_release(&comp_done->done, 0x1); 58 } 59 60 static int __qed_spq_block(struct qed_hwfn *p_hwfn, 61 struct qed_spq_entry *p_ent, 62 u8 *p_fw_ret, bool sleep_between_iter) 63 { 64 struct qed_spq_comp_done *comp_done; 65 u32 iter_cnt; 66 67 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie; 68 iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER 69 : SPQ_BLOCK_DELAY_MAX_ITER; 70 71 while (iter_cnt--) { 72 /* Validate we receive completion update */ 73 if (smp_load_acquire(&comp_done->done) == 1) { /* ^^^ */ 74 if (p_fw_ret) 75 *p_fw_ret = comp_done->fw_return_code; 76 return 0; 77 } 78 79 if (sleep_between_iter) 80 msleep(SPQ_BLOCK_SLEEP_MS); 81 else 82 udelay(SPQ_BLOCK_DELAY_US); 83 } 84 85 return -EBUSY; 86 } 87 88 static int qed_spq_block(struct qed_hwfn *p_hwfn, 89 struct qed_spq_entry *p_ent, 90 u8 *p_fw_ret, bool skip_quick_poll) 91 { 92 struct qed_spq_comp_done *comp_done; 93 struct qed_ptt *p_ptt; 94 int rc; 95 96 /* A relatively short polling period w/o sleeping, to allow the FW to 97 * complete the ramrod and thus possibly to avoid the following sleeps. 98 */ 99 if (!skip_quick_poll) { 100 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false); 101 if (!rc) 102 return 0; 103 } 104 105 /* Move to polling with a sleeping period between iterations */ 106 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true); 107 if (!rc) 108 return 0; 109 110 p_ptt = qed_ptt_acquire(p_hwfn); 111 if (!p_ptt) { 112 DP_NOTICE(p_hwfn, "ptt, failed to acquire\n"); 113 return -EAGAIN; 114 } 115 116 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n"); 117 rc = qed_mcp_drain(p_hwfn, p_ptt); 118 qed_ptt_release(p_hwfn, p_ptt); 119 if (rc) { 120 DP_NOTICE(p_hwfn, "MCP drain failed\n"); 121 goto err; 122 } 123 124 /* Retry after drain */ 125 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true); 126 if (!rc) 127 return 0; 128 129 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie; 130 if (comp_done->done == 1) { 131 if (p_fw_ret) 132 *p_fw_ret = comp_done->fw_return_code; 133 return 0; 134 } 135 err: 136 p_ptt = qed_ptt_acquire(p_hwfn); 137 if (!p_ptt) 138 return -EBUSY; 139 qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_RAMROD_FAIL, 140 "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n", 141 le32_to_cpu(p_ent->elem.hdr.cid), 142 p_ent->elem.hdr.cmd_id, 143 p_ent->elem.hdr.protocol_id, 144 le16_to_cpu(p_ent->elem.hdr.echo)); 145 qed_ptt_release(p_hwfn, p_ptt); 146 147 return -EBUSY; 148 } 149 150 /*************************************************************************** 151 * SPQ entries inner API 152 ***************************************************************************/ 153 static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn, 154 struct qed_spq_entry *p_ent) 155 { 156 p_ent->flags = 0; 157 158 switch (p_ent->comp_mode) { 159 case QED_SPQ_MODE_EBLOCK: 160 case QED_SPQ_MODE_BLOCK: 161 p_ent->comp_cb.function = qed_spq_blocking_cb; 162 break; 163 case QED_SPQ_MODE_CB: 164 break; 165 default: 166 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n", 167 p_ent->comp_mode); 168 return -EINVAL; 169 } 170 171 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, 172 "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n", 173 p_ent->elem.hdr.cid, 174 p_ent->elem.hdr.cmd_id, 175 p_ent->elem.hdr.protocol_id, 176 p_ent->elem.data_ptr.hi, 177 p_ent->elem.data_ptr.lo, 178 D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK, 179 QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK", 180 "MODE_CB")); 181 182 return 0; 183 } 184 185 /*************************************************************************** 186 * HSI access 187 ***************************************************************************/ 188 static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn, 189 struct qed_spq *p_spq) 190 { 191 struct e4_core_conn_context *p_cxt; 192 struct qed_cxt_info cxt_info; 193 u16 physical_q; 194 int rc; 195 196 cxt_info.iid = p_spq->cid; 197 198 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info); 199 200 if (rc < 0) { 201 DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n", 202 p_spq->cid); 203 return; 204 } 205 206 p_cxt = cxt_info.p_cxt; 207 208 SET_FIELD(p_cxt->xstorm_ag_context.flags10, 209 E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1); 210 SET_FIELD(p_cxt->xstorm_ag_context.flags1, 211 E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1); 212 SET_FIELD(p_cxt->xstorm_ag_context.flags9, 213 E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1); 214 215 /* QM physical queue */ 216 physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB); 217 p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q); 218 219 p_cxt->xstorm_st_context.spq_base_lo = 220 DMA_LO_LE(p_spq->chain.p_phys_addr); 221 p_cxt->xstorm_st_context.spq_base_hi = 222 DMA_HI_LE(p_spq->chain.p_phys_addr); 223 224 DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr, 225 p_hwfn->p_consq->chain.p_phys_addr); 226 } 227 228 static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, 229 struct qed_spq *p_spq, struct qed_spq_entry *p_ent) 230 { 231 struct qed_chain *p_chain = &p_hwfn->p_spq->chain; 232 struct core_db_data *p_db_data = &p_spq->db_data; 233 u16 echo = qed_chain_get_prod_idx(p_chain); 234 struct slow_path_element *elem; 235 236 p_ent->elem.hdr.echo = cpu_to_le16(echo); 237 elem = qed_chain_produce(p_chain); 238 if (!elem) { 239 DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n"); 240 return -EINVAL; 241 } 242 243 *elem = p_ent->elem; /* struct assignment */ 244 245 /* send a doorbell on the slow hwfn session */ 246 p_db_data->spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain)); 247 248 /* make sure the SPQE is updated before the doorbell */ 249 wmb(); 250 251 DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data); 252 253 /* make sure doorbell is rang */ 254 wmb(); 255 256 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, 257 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n", 258 p_spq->db_addr_offset, 259 p_spq->cid, 260 p_db_data->params, 261 p_db_data->agg_flags, qed_chain_get_prod_idx(p_chain)); 262 263 return 0; 264 } 265 266 /*************************************************************************** 267 * Asynchronous events 268 ***************************************************************************/ 269 static int 270 qed_async_event_completion(struct qed_hwfn *p_hwfn, 271 struct event_ring_entry *p_eqe) 272 { 273 qed_spq_async_comp_cb cb; 274 275 if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE)) 276 return -EINVAL; 277 278 cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id]; 279 if (cb) { 280 return cb(p_hwfn, p_eqe->opcode, p_eqe->echo, 281 &p_eqe->data, p_eqe->fw_return_code); 282 } else { 283 DP_NOTICE(p_hwfn, 284 "Unknown Async completion for protocol: %d\n", 285 p_eqe->protocol_id); 286 return -EINVAL; 287 } 288 } 289 290 int 291 qed_spq_register_async_cb(struct qed_hwfn *p_hwfn, 292 enum protocol_type protocol_id, 293 qed_spq_async_comp_cb cb) 294 { 295 if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE)) 296 return -EINVAL; 297 298 p_hwfn->p_spq->async_comp_cb[protocol_id] = cb; 299 return 0; 300 } 301 302 void 303 qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn, 304 enum protocol_type protocol_id) 305 { 306 if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE)) 307 return; 308 309 p_hwfn->p_spq->async_comp_cb[protocol_id] = NULL; 310 } 311 312 /*************************************************************************** 313 * EQ API 314 ***************************************************************************/ 315 void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod) 316 { 317 u32 addr = GTT_BAR0_MAP_REG_USDM_RAM + 318 USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id); 319 320 REG_WR16(p_hwfn, addr, prod); 321 } 322 323 int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie) 324 { 325 struct qed_eq *p_eq = cookie; 326 struct qed_chain *p_chain = &p_eq->chain; 327 int rc = 0; 328 329 /* take a snapshot of the FW consumer */ 330 u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons); 331 332 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx); 333 334 /* Need to guarantee the fw_cons index we use points to a usuable 335 * element (to comply with our chain), so our macros would comply 336 */ 337 if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) == 338 qed_chain_get_usable_per_page(p_chain)) 339 fw_cons_idx += qed_chain_get_unusable_per_page(p_chain); 340 341 /* Complete current segment of eq entries */ 342 while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) { 343 struct event_ring_entry *p_eqe = qed_chain_consume(p_chain); 344 345 if (!p_eqe) { 346 rc = -EINVAL; 347 break; 348 } 349 350 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, 351 "op %x prot %x res0 %x echo %x fwret %x flags %x\n", 352 p_eqe->opcode, 353 p_eqe->protocol_id, 354 p_eqe->reserved0, 355 le16_to_cpu(p_eqe->echo), 356 p_eqe->fw_return_code, 357 p_eqe->flags); 358 359 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) { 360 if (qed_async_event_completion(p_hwfn, p_eqe)) 361 rc = -EINVAL; 362 } else if (qed_spq_completion(p_hwfn, 363 p_eqe->echo, 364 p_eqe->fw_return_code, 365 &p_eqe->data)) { 366 rc = -EINVAL; 367 } 368 369 qed_chain_recycle_consumed(p_chain); 370 } 371 372 qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain)); 373 374 /* Attempt to post pending requests */ 375 spin_lock_bh(&p_hwfn->p_spq->lock); 376 rc = qed_spq_pend_post(p_hwfn); 377 spin_unlock_bh(&p_hwfn->p_spq->lock); 378 379 return rc; 380 } 381 382 int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem) 383 { 384 struct qed_eq *p_eq; 385 386 /* Allocate EQ struct */ 387 p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL); 388 if (!p_eq) 389 return -ENOMEM; 390 391 /* Allocate and initialize EQ chain*/ 392 if (qed_chain_alloc(p_hwfn->cdev, 393 QED_CHAIN_USE_TO_PRODUCE, 394 QED_CHAIN_MODE_PBL, 395 QED_CHAIN_CNT_TYPE_U16, 396 num_elem, 397 sizeof(union event_ring_element), 398 &p_eq->chain, NULL)) 399 goto eq_allocate_fail; 400 401 /* register EQ completion on the SP SB */ 402 qed_int_register_cb(p_hwfn, qed_eq_completion, 403 p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons); 404 405 p_hwfn->p_eq = p_eq; 406 return 0; 407 408 eq_allocate_fail: 409 kfree(p_eq); 410 return -ENOMEM; 411 } 412 413 void qed_eq_setup(struct qed_hwfn *p_hwfn) 414 { 415 qed_chain_reset(&p_hwfn->p_eq->chain); 416 } 417 418 void qed_eq_free(struct qed_hwfn *p_hwfn) 419 { 420 if (!p_hwfn->p_eq) 421 return; 422 423 qed_chain_free(p_hwfn->cdev, &p_hwfn->p_eq->chain); 424 425 kfree(p_hwfn->p_eq); 426 p_hwfn->p_eq = NULL; 427 } 428 429 /*************************************************************************** 430 * CQE API - manipulate EQ functionality 431 ***************************************************************************/ 432 static int qed_cqe_completion(struct qed_hwfn *p_hwfn, 433 struct eth_slow_path_rx_cqe *cqe, 434 enum protocol_type protocol) 435 { 436 if (IS_VF(p_hwfn->cdev)) 437 return 0; 438 439 /* @@@tmp - it's possible we'll eventually want to handle some 440 * actual commands that can arrive here, but for now this is only 441 * used to complete the ramrod using the echo value on the cqe 442 */ 443 return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL); 444 } 445 446 int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn, 447 struct eth_slow_path_rx_cqe *cqe) 448 { 449 int rc; 450 451 rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH); 452 if (rc) 453 DP_NOTICE(p_hwfn, 454 "Failed to handle RXQ CQE [cmd 0x%02x]\n", 455 cqe->ramrod_cmd_id); 456 457 return rc; 458 } 459 460 /*************************************************************************** 461 * Slow hwfn Queue (spq) 462 ***************************************************************************/ 463 void qed_spq_setup(struct qed_hwfn *p_hwfn) 464 { 465 struct qed_spq *p_spq = p_hwfn->p_spq; 466 struct qed_spq_entry *p_virt = NULL; 467 struct core_db_data *p_db_data; 468 void __iomem *db_addr; 469 dma_addr_t p_phys = 0; 470 u32 i, capacity; 471 int rc; 472 473 INIT_LIST_HEAD(&p_spq->pending); 474 INIT_LIST_HEAD(&p_spq->completion_pending); 475 INIT_LIST_HEAD(&p_spq->free_pool); 476 INIT_LIST_HEAD(&p_spq->unlimited_pending); 477 spin_lock_init(&p_spq->lock); 478 479 /* SPQ empty pool */ 480 p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod); 481 p_virt = p_spq->p_virt; 482 483 capacity = qed_chain_get_capacity(&p_spq->chain); 484 for (i = 0; i < capacity; i++) { 485 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys); 486 487 list_add_tail(&p_virt->list, &p_spq->free_pool); 488 489 p_virt++; 490 p_phys += sizeof(struct qed_spq_entry); 491 } 492 493 /* Statistics */ 494 p_spq->normal_count = 0; 495 p_spq->comp_count = 0; 496 p_spq->comp_sent_count = 0; 497 p_spq->unlimited_pending_count = 0; 498 499 bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE); 500 p_spq->comp_bitmap_idx = 0; 501 502 /* SPQ cid, cannot fail */ 503 qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid); 504 qed_spq_hw_initialize(p_hwfn, p_spq); 505 506 /* reset the chain itself */ 507 qed_chain_reset(&p_spq->chain); 508 509 /* Initialize the address/data of the SPQ doorbell */ 510 p_spq->db_addr_offset = qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY); 511 p_db_data = &p_spq->db_data; 512 memset(p_db_data, 0, sizeof(*p_db_data)); 513 SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM); 514 SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX); 515 SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL, 516 DQ_XCM_CORE_SPQ_PROD_CMD); 517 p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD; 518 519 /* Register the SPQ doorbell with the doorbell recovery mechanism */ 520 db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells + 521 p_spq->db_addr_offset); 522 rc = qed_db_recovery_add(p_hwfn->cdev, db_addr, &p_spq->db_data, 523 DB_REC_WIDTH_32B, DB_REC_KERNEL); 524 if (rc) 525 DP_INFO(p_hwfn, 526 "Failed to register the SPQ doorbell with the doorbell recovery mechanism\n"); 527 } 528 529 int qed_spq_alloc(struct qed_hwfn *p_hwfn) 530 { 531 struct qed_spq_entry *p_virt = NULL; 532 struct qed_spq *p_spq = NULL; 533 dma_addr_t p_phys = 0; 534 u32 capacity; 535 536 /* SPQ struct */ 537 p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL); 538 if (!p_spq) 539 return -ENOMEM; 540 541 /* SPQ ring */ 542 if (qed_chain_alloc(p_hwfn->cdev, 543 QED_CHAIN_USE_TO_PRODUCE, 544 QED_CHAIN_MODE_SINGLE, 545 QED_CHAIN_CNT_TYPE_U16, 546 0, /* N/A when the mode is SINGLE */ 547 sizeof(struct slow_path_element), 548 &p_spq->chain, NULL)) 549 goto spq_allocate_fail; 550 551 /* allocate and fill the SPQ elements (incl. ramrod data list) */ 552 capacity = qed_chain_get_capacity(&p_spq->chain); 553 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 554 capacity * sizeof(struct qed_spq_entry), 555 &p_phys, GFP_KERNEL); 556 if (!p_virt) 557 goto spq_allocate_fail; 558 559 p_spq->p_virt = p_virt; 560 p_spq->p_phys = p_phys; 561 p_hwfn->p_spq = p_spq; 562 563 return 0; 564 565 spq_allocate_fail: 566 qed_chain_free(p_hwfn->cdev, &p_spq->chain); 567 kfree(p_spq); 568 return -ENOMEM; 569 } 570 571 void qed_spq_free(struct qed_hwfn *p_hwfn) 572 { 573 struct qed_spq *p_spq = p_hwfn->p_spq; 574 void __iomem *db_addr; 575 u32 capacity; 576 577 if (!p_spq) 578 return; 579 580 /* Delete the SPQ doorbell from the doorbell recovery mechanism */ 581 db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells + 582 p_spq->db_addr_offset); 583 qed_db_recovery_del(p_hwfn->cdev, db_addr, &p_spq->db_data); 584 585 if (p_spq->p_virt) { 586 capacity = qed_chain_get_capacity(&p_spq->chain); 587 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 588 capacity * 589 sizeof(struct qed_spq_entry), 590 p_spq->p_virt, p_spq->p_phys); 591 } 592 593 qed_chain_free(p_hwfn->cdev, &p_spq->chain); 594 kfree(p_spq); 595 p_hwfn->p_spq = NULL; 596 } 597 598 int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent) 599 { 600 struct qed_spq *p_spq = p_hwfn->p_spq; 601 struct qed_spq_entry *p_ent = NULL; 602 int rc = 0; 603 604 spin_lock_bh(&p_spq->lock); 605 606 if (list_empty(&p_spq->free_pool)) { 607 p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC); 608 if (!p_ent) { 609 DP_NOTICE(p_hwfn, 610 "Failed to allocate an SPQ entry for a pending ramrod\n"); 611 rc = -ENOMEM; 612 goto out_unlock; 613 } 614 p_ent->queue = &p_spq->unlimited_pending; 615 } else { 616 p_ent = list_first_entry(&p_spq->free_pool, 617 struct qed_spq_entry, list); 618 list_del(&p_ent->list); 619 p_ent->queue = &p_spq->pending; 620 } 621 622 *pp_ent = p_ent; 623 624 out_unlock: 625 spin_unlock_bh(&p_spq->lock); 626 return rc; 627 } 628 629 /* Locked variant; Should be called while the SPQ lock is taken */ 630 static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn, 631 struct qed_spq_entry *p_ent) 632 { 633 list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool); 634 } 635 636 void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent) 637 { 638 spin_lock_bh(&p_hwfn->p_spq->lock); 639 __qed_spq_return_entry(p_hwfn, p_ent); 640 spin_unlock_bh(&p_hwfn->p_spq->lock); 641 } 642 643 /** 644 * @brief qed_spq_add_entry - adds a new entry to the pending 645 * list. Should be used while lock is being held. 646 * 647 * Addes an entry to the pending list is there is room (en empty 648 * element is available in the free_pool), or else places the 649 * entry in the unlimited_pending pool. 650 * 651 * @param p_hwfn 652 * @param p_ent 653 * @param priority 654 * 655 * @return int 656 */ 657 static int qed_spq_add_entry(struct qed_hwfn *p_hwfn, 658 struct qed_spq_entry *p_ent, 659 enum spq_priority priority) 660 { 661 struct qed_spq *p_spq = p_hwfn->p_spq; 662 663 if (p_ent->queue == &p_spq->unlimited_pending) { 664 665 if (list_empty(&p_spq->free_pool)) { 666 list_add_tail(&p_ent->list, &p_spq->unlimited_pending); 667 p_spq->unlimited_pending_count++; 668 669 return 0; 670 } else { 671 struct qed_spq_entry *p_en2; 672 673 p_en2 = list_first_entry(&p_spq->free_pool, 674 struct qed_spq_entry, list); 675 list_del(&p_en2->list); 676 677 /* Copy the ring element physical pointer to the new 678 * entry, since we are about to override the entire ring 679 * entry and don't want to lose the pointer. 680 */ 681 p_ent->elem.data_ptr = p_en2->elem.data_ptr; 682 683 *p_en2 = *p_ent; 684 685 /* EBLOCK responsible to free the allocated p_ent */ 686 if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK) 687 kfree(p_ent); 688 else 689 p_ent->post_ent = p_en2; 690 691 p_ent = p_en2; 692 } 693 } 694 695 /* entry is to be placed in 'pending' queue */ 696 switch (priority) { 697 case QED_SPQ_PRIORITY_NORMAL: 698 list_add_tail(&p_ent->list, &p_spq->pending); 699 p_spq->normal_count++; 700 break; 701 case QED_SPQ_PRIORITY_HIGH: 702 list_add(&p_ent->list, &p_spq->pending); 703 p_spq->high_count++; 704 break; 705 default: 706 return -EINVAL; 707 } 708 709 return 0; 710 } 711 712 /*************************************************************************** 713 * Accessor 714 ***************************************************************************/ 715 u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn) 716 { 717 if (!p_hwfn->p_spq) 718 return 0xffffffff; /* illegal */ 719 return p_hwfn->p_spq->cid; 720 } 721 722 /*************************************************************************** 723 * Posting new Ramrods 724 ***************************************************************************/ 725 static int qed_spq_post_list(struct qed_hwfn *p_hwfn, 726 struct list_head *head, u32 keep_reserve) 727 { 728 struct qed_spq *p_spq = p_hwfn->p_spq; 729 int rc; 730 731 while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve && 732 !list_empty(head)) { 733 struct qed_spq_entry *p_ent = 734 list_first_entry(head, struct qed_spq_entry, list); 735 list_move_tail(&p_ent->list, &p_spq->completion_pending); 736 p_spq->comp_sent_count++; 737 738 rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent); 739 if (rc) { 740 list_del(&p_ent->list); 741 __qed_spq_return_entry(p_hwfn, p_ent); 742 return rc; 743 } 744 } 745 746 return 0; 747 } 748 749 int qed_spq_pend_post(struct qed_hwfn *p_hwfn) 750 { 751 struct qed_spq *p_spq = p_hwfn->p_spq; 752 struct qed_spq_entry *p_ent = NULL; 753 754 while (!list_empty(&p_spq->free_pool)) { 755 if (list_empty(&p_spq->unlimited_pending)) 756 break; 757 758 p_ent = list_first_entry(&p_spq->unlimited_pending, 759 struct qed_spq_entry, list); 760 if (!p_ent) 761 return -EINVAL; 762 763 list_del(&p_ent->list); 764 765 qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority); 766 } 767 768 return qed_spq_post_list(p_hwfn, &p_spq->pending, 769 SPQ_HIGH_PRI_RESERVE_DEFAULT); 770 } 771 772 static void qed_spq_recov_set_ret_code(struct qed_spq_entry *p_ent, 773 u8 *fw_return_code) 774 { 775 if (!fw_return_code) 776 return; 777 778 if (p_ent->elem.hdr.protocol_id == PROTOCOLID_ROCE || 779 p_ent->elem.hdr.protocol_id == PROTOCOLID_IWARP) 780 *fw_return_code = RDMA_RETURN_OK; 781 } 782 783 /* Avoid overriding of SPQ entries when getting out-of-order completions, by 784 * marking the completions in a bitmap and increasing the chain consumer only 785 * for the first successive completed entries. 786 */ 787 static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo) 788 { 789 u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE; 790 struct qed_spq *p_spq = p_hwfn->p_spq; 791 792 __set_bit(pos, p_spq->p_comp_bitmap); 793 while (test_bit(p_spq->comp_bitmap_idx, 794 p_spq->p_comp_bitmap)) { 795 __clear_bit(p_spq->comp_bitmap_idx, 796 p_spq->p_comp_bitmap); 797 p_spq->comp_bitmap_idx++; 798 qed_chain_return_produced(&p_spq->chain); 799 } 800 } 801 802 int qed_spq_post(struct qed_hwfn *p_hwfn, 803 struct qed_spq_entry *p_ent, u8 *fw_return_code) 804 { 805 int rc = 0; 806 struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL; 807 bool b_ret_ent = true; 808 bool eblock; 809 810 if (!p_hwfn) 811 return -EINVAL; 812 813 if (!p_ent) { 814 DP_NOTICE(p_hwfn, "Got a NULL pointer\n"); 815 return -EINVAL; 816 } 817 818 if (p_hwfn->cdev->recov_in_prog) { 819 DP_VERBOSE(p_hwfn, 820 QED_MSG_SPQ, 821 "Recovery is in progress. Skip spq post [cmd %02x protocol %02x]\n", 822 p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id); 823 824 /* Let the flow complete w/o any error handling */ 825 qed_spq_recov_set_ret_code(p_ent, fw_return_code); 826 return 0; 827 } 828 829 /* Complete the entry */ 830 rc = qed_spq_fill_entry(p_hwfn, p_ent); 831 832 spin_lock_bh(&p_spq->lock); 833 834 /* Check return value after LOCK is taken for cleaner error flow */ 835 if (rc) 836 goto spq_post_fail; 837 838 /* Check if entry is in block mode before qed_spq_add_entry, 839 * which might kfree p_ent. 840 */ 841 eblock = (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK); 842 843 /* Add the request to the pending queue */ 844 rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority); 845 if (rc) 846 goto spq_post_fail; 847 848 rc = qed_spq_pend_post(p_hwfn); 849 if (rc) { 850 /* Since it's possible that pending failed for a different 851 * entry [although unlikely], the failed entry was already 852 * dealt with; No need to return it here. 853 */ 854 b_ret_ent = false; 855 goto spq_post_fail; 856 } 857 858 spin_unlock_bh(&p_spq->lock); 859 860 if (eblock) { 861 /* For entries in QED BLOCK mode, the completion code cannot 862 * perform the necessary cleanup - if it did, we couldn't 863 * access p_ent here to see whether it's successful or not. 864 * Thus, after gaining the answer perform the cleanup here. 865 */ 866 rc = qed_spq_block(p_hwfn, p_ent, fw_return_code, 867 p_ent->queue == &p_spq->unlimited_pending); 868 869 if (p_ent->queue == &p_spq->unlimited_pending) { 870 struct qed_spq_entry *p_post_ent = p_ent->post_ent; 871 872 kfree(p_ent); 873 874 /* Return the entry which was actually posted */ 875 p_ent = p_post_ent; 876 } 877 878 if (rc) 879 goto spq_post_fail2; 880 881 /* return to pool */ 882 qed_spq_return_entry(p_hwfn, p_ent); 883 } 884 return rc; 885 886 spq_post_fail2: 887 spin_lock_bh(&p_spq->lock); 888 list_del(&p_ent->list); 889 qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo); 890 891 spq_post_fail: 892 /* return to the free pool */ 893 if (b_ret_ent) 894 __qed_spq_return_entry(p_hwfn, p_ent); 895 spin_unlock_bh(&p_spq->lock); 896 897 return rc; 898 } 899 900 int qed_spq_completion(struct qed_hwfn *p_hwfn, 901 __le16 echo, 902 u8 fw_return_code, 903 union event_ring_data *p_data) 904 { 905 struct qed_spq *p_spq; 906 struct qed_spq_entry *p_ent = NULL; 907 struct qed_spq_entry *tmp; 908 struct qed_spq_entry *found = NULL; 909 910 if (!p_hwfn) 911 return -EINVAL; 912 913 p_spq = p_hwfn->p_spq; 914 if (!p_spq) 915 return -EINVAL; 916 917 spin_lock_bh(&p_spq->lock); 918 list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) { 919 if (p_ent->elem.hdr.echo == echo) { 920 list_del(&p_ent->list); 921 qed_spq_comp_bmap_update(p_hwfn, echo); 922 p_spq->comp_count++; 923 found = p_ent; 924 break; 925 } 926 927 /* This is relatively uncommon - depends on scenarios 928 * which have mutliple per-PF sent ramrods. 929 */ 930 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, 931 "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n", 932 le16_to_cpu(echo), 933 le16_to_cpu(p_ent->elem.hdr.echo)); 934 } 935 936 /* Release lock before callback, as callback may post 937 * an additional ramrod. 938 */ 939 spin_unlock_bh(&p_spq->lock); 940 941 if (!found) { 942 DP_NOTICE(p_hwfn, 943 "Failed to find an entry this EQE [echo %04x] completes\n", 944 le16_to_cpu(echo)); 945 return -EEXIST; 946 } 947 948 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, 949 "Complete EQE [echo %04x]: func %p cookie %p)\n", 950 le16_to_cpu(echo), 951 p_ent->comp_cb.function, p_ent->comp_cb.cookie); 952 if (found->comp_cb.function) 953 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data, 954 fw_return_code); 955 else 956 DP_VERBOSE(p_hwfn, 957 QED_MSG_SPQ, 958 "Got a completion without a callback function\n"); 959 960 if (found->comp_mode != QED_SPQ_MODE_EBLOCK) 961 /* EBLOCK is responsible for returning its own entry into the 962 * free list. 963 */ 964 qed_spq_return_entry(p_hwfn, found); 965 966 return 0; 967 } 968 969 int qed_consq_alloc(struct qed_hwfn *p_hwfn) 970 { 971 struct qed_consq *p_consq; 972 973 /* Allocate ConsQ struct */ 974 p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL); 975 if (!p_consq) 976 return -ENOMEM; 977 978 /* Allocate and initialize EQ chain*/ 979 if (qed_chain_alloc(p_hwfn->cdev, 980 QED_CHAIN_USE_TO_PRODUCE, 981 QED_CHAIN_MODE_PBL, 982 QED_CHAIN_CNT_TYPE_U16, 983 QED_CHAIN_PAGE_SIZE / 0x80, 984 0x80, &p_consq->chain, NULL)) 985 goto consq_allocate_fail; 986 987 p_hwfn->p_consq = p_consq; 988 return 0; 989 990 consq_allocate_fail: 991 kfree(p_consq); 992 return -ENOMEM; 993 } 994 995 void qed_consq_setup(struct qed_hwfn *p_hwfn) 996 { 997 qed_chain_reset(&p_hwfn->p_consq->chain); 998 } 999 1000 void qed_consq_free(struct qed_hwfn *p_hwfn) 1001 { 1002 if (!p_hwfn->p_consq) 1003 return; 1004 1005 qed_chain_free(p_hwfn->cdev, &p_hwfn->p_consq->chain); 1006 1007 kfree(p_hwfn->p_consq); 1008 p_hwfn->p_consq = NULL; 1009 } 1010