1 /* 2 * Broadcom NetXtreme-E RoCE driver. 3 * 4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term 5 * Broadcom refers to Broadcom Limited and/or its subsidiaries. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in 21 * the documentation and/or other materials provided with the 22 * distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 * 36 * Description: RDMA Controller HW interface 37 */ 38 #include <linux/interrupt.h> 39 #include <linux/spinlock.h> 40 #include <linux/pci.h> 41 #include <linux/prefetch.h> 42 #include <linux/delay.h> 43 44 #include "roce_hsi.h" 45 #include "qplib_res.h" 46 #include "qplib_rcfw.h" 47 #include "qplib_sp.h" 48 #include "qplib_fp.h" 49 50 static void bnxt_qplib_service_creq(unsigned long data); 51 52 /* Hardware communication channel */ 53 static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) 54 { 55 u16 cbit; 56 int rc; 57 58 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; 59 rc = wait_event_timeout(rcfw->waitq, 60 !test_bit(cbit, rcfw->cmdq_bitmap), 61 msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS)); 62 return rc ? 0 : -ETIMEDOUT; 63 }; 64 65 static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) 66 { 67 u32 count = RCFW_BLOCKED_CMD_WAIT_COUNT; 68 u16 cbit; 69 70 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; 71 if (!test_bit(cbit, rcfw->cmdq_bitmap)) 72 goto done; 73 do { 74 mdelay(1); /* 1m sec */ 75 bnxt_qplib_service_creq((unsigned long)rcfw); 76 } while (test_bit(cbit, rcfw->cmdq_bitmap) && --count); 77 done: 78 return count ? 0 : -ETIMEDOUT; 79 }; 80 81 static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, 82 struct creq_base *resp, void *sb, u8 is_block) 83 { 84 struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr; 85 struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; 86 struct bnxt_qplib_crsq *crsqe; 87 u32 sw_prod, cmdq_prod; 88 unsigned long flags; 89 u32 size, opcode; 90 u16 cookie, cbit; 91 u8 *preq; 92 93 opcode = req->opcode; 94 if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && 95 (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC && 96 opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW)) { 97 dev_err(&rcfw->pdev->dev, 98 "QPLIB: RCFW not initialized, reject opcode 0x%x", 99 opcode); 100 return -EINVAL; 101 } 102 103 if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && 104 opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) { 105 dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!"); 106 return -EINVAL; 107 } 108 109 if (test_bit(FIRMWARE_TIMED_OUT, &rcfw->flags)) 110 return -ETIMEDOUT; 111 112 /* Cmdq are in 16-byte units, each request can consume 1 or more 113 * cmdqe 114 */ 115 spin_lock_irqsave(&cmdq->lock, flags); 116 if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) { 117 dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!"); 118 spin_unlock_irqrestore(&cmdq->lock, flags); 119 return -EAGAIN; 120 } 121 122 123 cookie = rcfw->seq_num & RCFW_MAX_COOKIE_VALUE; 124 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; 125 if (is_block) 126 cookie |= RCFW_CMD_IS_BLOCKING; 127 128 set_bit(cbit, rcfw->cmdq_bitmap); 129 req->cookie = cpu_to_le16(cookie); 130 crsqe = &rcfw->crsqe_tbl[cbit]; 131 if (crsqe->resp) { 132 spin_unlock_irqrestore(&cmdq->lock, flags); 133 return -EBUSY; 134 } 135 memset(resp, 0, sizeof(*resp)); 136 crsqe->resp = (struct creq_qp_event *)resp; 137 crsqe->resp->cookie = req->cookie; 138 crsqe->req_size = req->cmd_size; 139 if (req->resp_size && sb) { 140 struct bnxt_qplib_rcfw_sbuf *sbuf = sb; 141 142 req->resp_addr = cpu_to_le64(sbuf->dma_addr); 143 req->resp_size = (sbuf->size + BNXT_QPLIB_CMDQE_UNITS - 1) / 144 BNXT_QPLIB_CMDQE_UNITS; 145 } 146 147 cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr; 148 preq = (u8 *)req; 149 size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS; 150 do { 151 /* Locate the next cmdq slot */ 152 sw_prod = HWQ_CMP(cmdq->prod, cmdq); 153 cmdqe = &cmdq_ptr[get_cmdq_pg(sw_prod)][get_cmdq_idx(sw_prod)]; 154 if (!cmdqe) { 155 dev_err(&rcfw->pdev->dev, 156 "QPLIB: RCFW request failed with no cmdqe!"); 157 goto done; 158 } 159 /* Copy a segment of the req cmd to the cmdq */ 160 memset(cmdqe, 0, sizeof(*cmdqe)); 161 memcpy(cmdqe, preq, min_t(u32, size, sizeof(*cmdqe))); 162 preq += min_t(u32, size, sizeof(*cmdqe)); 163 size -= min_t(u32, size, sizeof(*cmdqe)); 164 cmdq->prod++; 165 rcfw->seq_num++; 166 } while (size > 0); 167 168 rcfw->seq_num++; 169 170 cmdq_prod = cmdq->prod; 171 if (test_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags)) { 172 /* The very first doorbell write 173 * is required to set this flag 174 * which prompts the FW to reset 175 * its internal pointers 176 */ 177 cmdq_prod |= BIT(FIRMWARE_FIRST_FLAG); 178 clear_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags); 179 } 180 181 /* ring CMDQ DB */ 182 wmb(); 183 writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem + 184 rcfw->cmdq_bar_reg_prod_off); 185 writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem + 186 rcfw->cmdq_bar_reg_trig_off); 187 done: 188 spin_unlock_irqrestore(&cmdq->lock, flags); 189 /* Return the CREQ response pointer */ 190 return 0; 191 } 192 193 int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, 194 struct cmdq_base *req, 195 struct creq_base *resp, 196 void *sb, u8 is_block) 197 { 198 struct creq_qp_event *evnt = (struct creq_qp_event *)resp; 199 u16 cookie; 200 u8 opcode, retry_cnt = 0xFF; 201 int rc = 0; 202 203 do { 204 opcode = req->opcode; 205 rc = __send_message(rcfw, req, resp, sb, is_block); 206 cookie = le16_to_cpu(req->cookie) & RCFW_MAX_COOKIE_VALUE; 207 if (!rc) 208 break; 209 210 if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) { 211 /* send failed */ 212 dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x send failed", 213 cookie, opcode); 214 return rc; 215 } 216 is_block ? mdelay(1) : usleep_range(500, 1000); 217 218 } while (retry_cnt--); 219 220 if (is_block) 221 rc = __block_for_resp(rcfw, cookie); 222 else 223 rc = __wait_for_resp(rcfw, cookie); 224 if (rc) { 225 /* timed out */ 226 dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec", 227 cookie, opcode, RCFW_CMD_WAIT_TIME_MS); 228 set_bit(FIRMWARE_TIMED_OUT, &rcfw->flags); 229 return rc; 230 } 231 232 if (evnt->status) { 233 /* failed with status */ 234 dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x status %#x", 235 cookie, opcode, evnt->status); 236 rc = -EFAULT; 237 } 238 239 return rc; 240 } 241 /* Completions */ 242 static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw, 243 struct creq_func_event *func_event) 244 { 245 switch (func_event->event) { 246 case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR: 247 break; 248 case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR: 249 break; 250 case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR: 251 break; 252 case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR: 253 break; 254 case CREQ_FUNC_EVENT_EVENT_CQ_ERROR: 255 break; 256 case CREQ_FUNC_EVENT_EVENT_TQM_ERROR: 257 break; 258 case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR: 259 break; 260 case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR: 261 /* SRQ ctx error, call srq_handler?? 262 * But there's no SRQ handle! 263 */ 264 break; 265 case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR: 266 break; 267 case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR: 268 break; 269 case CREQ_FUNC_EVENT_EVENT_TIM_ERROR: 270 break; 271 case CREQ_FUNC_EVENT_EVENT_VF_COMM_REQUEST: 272 break; 273 case CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED: 274 break; 275 default: 276 return -EINVAL; 277 } 278 return 0; 279 } 280 281 static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, 282 struct creq_qp_event *qp_event) 283 { 284 struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; 285 struct creq_qp_error_notification *err_event; 286 struct bnxt_qplib_crsq *crsqe; 287 unsigned long flags; 288 struct bnxt_qplib_qp *qp; 289 u16 cbit, blocked = 0; 290 u16 cookie; 291 __le16 mcookie; 292 u32 qp_id; 293 294 switch (qp_event->event) { 295 case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION: 296 err_event = (struct creq_qp_error_notification *)qp_event; 297 qp_id = le32_to_cpu(err_event->xid); 298 qp = rcfw->qp_tbl[qp_id].qp_handle; 299 dev_dbg(&rcfw->pdev->dev, 300 "QPLIB: Received QP error notification"); 301 dev_dbg(&rcfw->pdev->dev, 302 "QPLIB: qpid 0x%x, req_err=0x%x, resp_err=0x%x\n", 303 qp_id, err_event->req_err_state_reason, 304 err_event->res_err_state_reason); 305 if (!qp) 306 break; 307 bnxt_qplib_acquire_cq_locks(qp, &flags); 308 bnxt_qplib_mark_qp_error(qp); 309 bnxt_qplib_release_cq_locks(qp, &flags); 310 break; 311 default: 312 /* Command Response */ 313 spin_lock_irqsave(&cmdq->lock, flags); 314 cookie = le16_to_cpu(qp_event->cookie); 315 mcookie = qp_event->cookie; 316 blocked = cookie & RCFW_CMD_IS_BLOCKING; 317 cookie &= RCFW_MAX_COOKIE_VALUE; 318 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; 319 crsqe = &rcfw->crsqe_tbl[cbit]; 320 if (crsqe->resp && 321 crsqe->resp->cookie == mcookie) { 322 memcpy(crsqe->resp, qp_event, sizeof(*qp_event)); 323 crsqe->resp = NULL; 324 } else { 325 dev_err(&rcfw->pdev->dev, 326 "QPLIB: CMD %s resp->cookie = %#x, evnt->cookie = %#x", 327 crsqe->resp ? "mismatch" : "collision", 328 crsqe->resp ? crsqe->resp->cookie : 0, mcookie); 329 } 330 if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap)) 331 dev_warn(&rcfw->pdev->dev, 332 "QPLIB: CMD bit %d was not requested", cbit); 333 cmdq->cons += crsqe->req_size; 334 crsqe->req_size = 0; 335 336 if (!blocked) 337 wake_up(&rcfw->waitq); 338 spin_unlock_irqrestore(&cmdq->lock, flags); 339 } 340 return 0; 341 } 342 343 /* SP - CREQ Completion handlers */ 344 static void bnxt_qplib_service_creq(unsigned long data) 345 { 346 struct bnxt_qplib_rcfw *rcfw = (struct bnxt_qplib_rcfw *)data; 347 struct bnxt_qplib_hwq *creq = &rcfw->creq; 348 struct creq_base *creqe, **creq_ptr; 349 u32 sw_cons, raw_cons; 350 unsigned long flags; 351 u32 type, budget = CREQ_ENTRY_POLL_BUDGET; 352 353 /* Service the CREQ until budget is over */ 354 spin_lock_irqsave(&creq->lock, flags); 355 raw_cons = creq->cons; 356 while (budget > 0) { 357 sw_cons = HWQ_CMP(raw_cons, creq); 358 creq_ptr = (struct creq_base **)creq->pbl_ptr; 359 creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]; 360 if (!CREQ_CMP_VALID(creqe, raw_cons, creq->max_elements)) 361 break; 362 /* The valid test of the entry must be done first before 363 * reading any further. 364 */ 365 dma_rmb(); 366 367 type = creqe->type & CREQ_BASE_TYPE_MASK; 368 switch (type) { 369 case CREQ_BASE_TYPE_QP_EVENT: 370 bnxt_qplib_process_qp_event 371 (rcfw, (struct creq_qp_event *)creqe); 372 rcfw->creq_qp_event_processed++; 373 break; 374 case CREQ_BASE_TYPE_FUNC_EVENT: 375 if (!bnxt_qplib_process_func_event 376 (rcfw, (struct creq_func_event *)creqe)) 377 rcfw->creq_func_event_processed++; 378 else 379 dev_warn 380 (&rcfw->pdev->dev, "QPLIB:aeqe:%#x Not handled", 381 type); 382 break; 383 default: 384 dev_warn(&rcfw->pdev->dev, "QPLIB: creqe with "); 385 dev_warn(&rcfw->pdev->dev, 386 "QPLIB: op_event = 0x%x not handled", type); 387 break; 388 } 389 raw_cons++; 390 budget--; 391 } 392 393 if (creq->cons != raw_cons) { 394 creq->cons = raw_cons; 395 CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons, 396 creq->max_elements); 397 } 398 spin_unlock_irqrestore(&creq->lock, flags); 399 } 400 401 static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance) 402 { 403 struct bnxt_qplib_rcfw *rcfw = dev_instance; 404 struct bnxt_qplib_hwq *creq = &rcfw->creq; 405 struct creq_base **creq_ptr; 406 u32 sw_cons; 407 408 /* Prefetch the CREQ element */ 409 sw_cons = HWQ_CMP(creq->cons, creq); 410 creq_ptr = (struct creq_base **)rcfw->creq.pbl_ptr; 411 prefetch(&creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]); 412 413 tasklet_schedule(&rcfw->worker); 414 415 return IRQ_HANDLED; 416 } 417 418 /* RCFW */ 419 int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw) 420 { 421 struct cmdq_deinitialize_fw req; 422 struct creq_deinitialize_fw_resp resp; 423 u16 cmd_flags = 0; 424 int rc; 425 426 RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags); 427 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, 428 NULL, 0); 429 if (rc) 430 return rc; 431 432 clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); 433 return 0; 434 } 435 436 static int __get_pbl_pg_idx(struct bnxt_qplib_pbl *pbl) 437 { 438 return (pbl->pg_size == ROCE_PG_SIZE_4K ? 439 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K : 440 pbl->pg_size == ROCE_PG_SIZE_8K ? 441 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8K : 442 pbl->pg_size == ROCE_PG_SIZE_64K ? 443 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_64K : 444 pbl->pg_size == ROCE_PG_SIZE_2M ? 445 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_2M : 446 pbl->pg_size == ROCE_PG_SIZE_8M ? 447 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8M : 448 pbl->pg_size == ROCE_PG_SIZE_1G ? 449 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_1G : 450 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K); 451 } 452 453 int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, 454 struct bnxt_qplib_ctx *ctx, int is_virtfn) 455 { 456 struct cmdq_initialize_fw req; 457 struct creq_initialize_fw_resp resp; 458 u16 cmd_flags = 0, level; 459 int rc; 460 461 RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); 462 463 /* 464 * VFs need not setup the HW context area, PF 465 * shall setup this area for VF. Skipping the 466 * HW programming 467 */ 468 if (is_virtfn) 469 goto skip_ctx_setup; 470 471 level = ctx->qpc_tbl.level; 472 req.qpc_pg_size_qpc_lvl = (level << CMDQ_INITIALIZE_FW_QPC_LVL_SFT) | 473 __get_pbl_pg_idx(&ctx->qpc_tbl.pbl[level]); 474 level = ctx->mrw_tbl.level; 475 req.mrw_pg_size_mrw_lvl = (level << CMDQ_INITIALIZE_FW_MRW_LVL_SFT) | 476 __get_pbl_pg_idx(&ctx->mrw_tbl.pbl[level]); 477 level = ctx->srqc_tbl.level; 478 req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) | 479 __get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]); 480 level = ctx->cq_tbl.level; 481 req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) | 482 __get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]); 483 level = ctx->srqc_tbl.level; 484 req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) | 485 __get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]); 486 level = ctx->cq_tbl.level; 487 req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) | 488 __get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]); 489 level = ctx->tim_tbl.level; 490 req.tim_pg_size_tim_lvl = (level << CMDQ_INITIALIZE_FW_TIM_LVL_SFT) | 491 __get_pbl_pg_idx(&ctx->tim_tbl.pbl[level]); 492 level = ctx->tqm_pde_level; 493 req.tqm_pg_size_tqm_lvl = (level << CMDQ_INITIALIZE_FW_TQM_LVL_SFT) | 494 __get_pbl_pg_idx(&ctx->tqm_pde.pbl[level]); 495 496 req.qpc_page_dir = 497 cpu_to_le64(ctx->qpc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]); 498 req.mrw_page_dir = 499 cpu_to_le64(ctx->mrw_tbl.pbl[PBL_LVL_0].pg_map_arr[0]); 500 req.srq_page_dir = 501 cpu_to_le64(ctx->srqc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]); 502 req.cq_page_dir = 503 cpu_to_le64(ctx->cq_tbl.pbl[PBL_LVL_0].pg_map_arr[0]); 504 req.tim_page_dir = 505 cpu_to_le64(ctx->tim_tbl.pbl[PBL_LVL_0].pg_map_arr[0]); 506 req.tqm_page_dir = 507 cpu_to_le64(ctx->tqm_pde.pbl[PBL_LVL_0].pg_map_arr[0]); 508 509 req.number_of_qp = cpu_to_le32(ctx->qpc_tbl.max_elements); 510 req.number_of_mrw = cpu_to_le32(ctx->mrw_tbl.max_elements); 511 req.number_of_srq = cpu_to_le32(ctx->srqc_tbl.max_elements); 512 req.number_of_cq = cpu_to_le32(ctx->cq_tbl.max_elements); 513 514 req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf); 515 req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf); 516 req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf); 517 req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf); 518 req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf); 519 520 skip_ctx_setup: 521 req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id); 522 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, 523 NULL, 0); 524 if (rc) 525 return rc; 526 set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); 527 return 0; 528 } 529 530 void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) 531 { 532 kfree(rcfw->qp_tbl); 533 kfree(rcfw->crsqe_tbl); 534 bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq); 535 bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq); 536 rcfw->pdev = NULL; 537 } 538 539 int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev, 540 struct bnxt_qplib_rcfw *rcfw, 541 int qp_tbl_sz) 542 { 543 rcfw->pdev = pdev; 544 rcfw->creq.max_elements = BNXT_QPLIB_CREQE_MAX_CNT; 545 if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->creq, NULL, 0, 546 &rcfw->creq.max_elements, 547 BNXT_QPLIB_CREQE_UNITS, 0, PAGE_SIZE, 548 HWQ_TYPE_L2_CMPL)) { 549 dev_err(&rcfw->pdev->dev, 550 "QPLIB: HW channel CREQ allocation failed"); 551 goto fail; 552 } 553 rcfw->cmdq.max_elements = BNXT_QPLIB_CMDQE_MAX_CNT; 554 if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->cmdq, NULL, 0, 555 &rcfw->cmdq.max_elements, 556 BNXT_QPLIB_CMDQE_UNITS, 0, PAGE_SIZE, 557 HWQ_TYPE_CTX)) { 558 dev_err(&rcfw->pdev->dev, 559 "QPLIB: HW channel CMDQ allocation failed"); 560 goto fail; 561 } 562 563 rcfw->crsqe_tbl = kcalloc(rcfw->cmdq.max_elements, 564 sizeof(*rcfw->crsqe_tbl), GFP_KERNEL); 565 if (!rcfw->crsqe_tbl) 566 goto fail; 567 568 rcfw->qp_tbl_size = qp_tbl_sz; 569 rcfw->qp_tbl = kcalloc(qp_tbl_sz, sizeof(struct bnxt_qplib_qp_node), 570 GFP_KERNEL); 571 if (!rcfw->qp_tbl) 572 goto fail; 573 574 return 0; 575 576 fail: 577 bnxt_qplib_free_rcfw_channel(rcfw); 578 return -ENOMEM; 579 } 580 581 void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) 582 { 583 unsigned long indx; 584 585 /* Make sure the HW channel is stopped! */ 586 synchronize_irq(rcfw->vector); 587 tasklet_disable(&rcfw->worker); 588 tasklet_kill(&rcfw->worker); 589 590 if (rcfw->requested) { 591 free_irq(rcfw->vector, rcfw); 592 rcfw->requested = false; 593 } 594 if (rcfw->cmdq_bar_reg_iomem) 595 iounmap(rcfw->cmdq_bar_reg_iomem); 596 rcfw->cmdq_bar_reg_iomem = NULL; 597 598 if (rcfw->creq_bar_reg_iomem) 599 iounmap(rcfw->creq_bar_reg_iomem); 600 rcfw->creq_bar_reg_iomem = NULL; 601 602 indx = find_first_bit(rcfw->cmdq_bitmap, rcfw->bmap_size); 603 if (indx != rcfw->bmap_size) 604 dev_err(&rcfw->pdev->dev, 605 "QPLIB: disabling RCFW with pending cmd-bit %lx", indx); 606 kfree(rcfw->cmdq_bitmap); 607 rcfw->bmap_size = 0; 608 609 rcfw->aeq_handler = NULL; 610 rcfw->vector = 0; 611 } 612 613 int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, 614 struct bnxt_qplib_rcfw *rcfw, 615 int msix_vector, 616 int cp_bar_reg_off, int virt_fn, 617 int (*aeq_handler)(struct bnxt_qplib_rcfw *, 618 struct creq_func_event *)) 619 { 620 resource_size_t res_base; 621 struct cmdq_init init; 622 u16 bmap_size; 623 int rc; 624 625 /* General */ 626 rcfw->seq_num = 0; 627 set_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags); 628 bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD * 629 sizeof(unsigned long)); 630 rcfw->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL); 631 if (!rcfw->cmdq_bitmap) 632 return -ENOMEM; 633 rcfw->bmap_size = bmap_size; 634 635 /* CMDQ */ 636 rcfw->cmdq_bar_reg = RCFW_COMM_PCI_BAR_REGION; 637 res_base = pci_resource_start(pdev, rcfw->cmdq_bar_reg); 638 if (!res_base) 639 return -ENOMEM; 640 641 rcfw->cmdq_bar_reg_iomem = ioremap_nocache(res_base + 642 RCFW_COMM_BASE_OFFSET, 643 RCFW_COMM_SIZE); 644 if (!rcfw->cmdq_bar_reg_iomem) { 645 dev_err(&rcfw->pdev->dev, 646 "QPLIB: CMDQ BAR region %d mapping failed", 647 rcfw->cmdq_bar_reg); 648 return -ENOMEM; 649 } 650 651 rcfw->cmdq_bar_reg_prod_off = virt_fn ? RCFW_VF_COMM_PROD_OFFSET : 652 RCFW_PF_COMM_PROD_OFFSET; 653 654 rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET; 655 656 /* CREQ */ 657 rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION; 658 res_base = pci_resource_start(pdev, rcfw->creq_bar_reg); 659 if (!res_base) 660 dev_err(&rcfw->pdev->dev, 661 "QPLIB: CREQ BAR region %d resc start is 0!", 662 rcfw->creq_bar_reg); 663 rcfw->creq_bar_reg_iomem = ioremap_nocache(res_base + cp_bar_reg_off, 664 4); 665 if (!rcfw->creq_bar_reg_iomem) { 666 dev_err(&rcfw->pdev->dev, 667 "QPLIB: CREQ BAR region %d mapping failed", 668 rcfw->creq_bar_reg); 669 return -ENOMEM; 670 } 671 rcfw->creq_qp_event_processed = 0; 672 rcfw->creq_func_event_processed = 0; 673 674 rcfw->vector = msix_vector; 675 if (aeq_handler) 676 rcfw->aeq_handler = aeq_handler; 677 678 tasklet_init(&rcfw->worker, bnxt_qplib_service_creq, 679 (unsigned long)rcfw); 680 681 rcfw->requested = false; 682 rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0, 683 "bnxt_qplib_creq", rcfw); 684 if (rc) { 685 dev_err(&rcfw->pdev->dev, 686 "QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc); 687 bnxt_qplib_disable_rcfw_channel(rcfw); 688 return rc; 689 } 690 rcfw->requested = true; 691 692 init_waitqueue_head(&rcfw->waitq); 693 694 CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, 0, rcfw->creq.max_elements); 695 696 init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]); 697 init.cmdq_size_cmdq_lvl = cpu_to_le16( 698 ((BNXT_QPLIB_CMDQE_MAX_CNT << CMDQ_INIT_CMDQ_SIZE_SFT) & 699 CMDQ_INIT_CMDQ_SIZE_MASK) | 700 ((rcfw->cmdq.level << CMDQ_INIT_CMDQ_LVL_SFT) & 701 CMDQ_INIT_CMDQ_LVL_MASK)); 702 init.creq_ring_id = cpu_to_le16(rcfw->creq_ring_id); 703 704 /* Write to the Bono mailbox register */ 705 __iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4); 706 return 0; 707 } 708 709 struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf( 710 struct bnxt_qplib_rcfw *rcfw, 711 u32 size) 712 { 713 struct bnxt_qplib_rcfw_sbuf *sbuf; 714 715 sbuf = kzalloc(sizeof(*sbuf), GFP_ATOMIC); 716 if (!sbuf) 717 return NULL; 718 719 sbuf->size = size; 720 sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size, 721 &sbuf->dma_addr, GFP_ATOMIC); 722 if (!sbuf->sb) 723 goto bail; 724 725 return sbuf; 726 bail: 727 kfree(sbuf); 728 return NULL; 729 } 730 731 void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw, 732 struct bnxt_qplib_rcfw_sbuf *sbuf) 733 { 734 if (sbuf->sb) 735 dma_free_coherent(&rcfw->pdev->dev, sbuf->size, 736 sbuf->sb, sbuf->dma_addr); 737 kfree(sbuf); 738 } 739