1 /* 2 * Broadcom NetXtreme-E RoCE driver. 3 * 4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term 5 * Broadcom refers to Broadcom Limited and/or its subsidiaries. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in 21 * the documentation and/or other materials provided with the 22 * distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 * 36 * Description: RDMA Controller HW interface 37 */ 38 #include <linux/interrupt.h> 39 #include <linux/spinlock.h> 40 #include <linux/pci.h> 41 #include <linux/prefetch.h> 42 #include <linux/delay.h> 43 44 #include "roce_hsi.h" 45 #include "qplib_res.h" 46 #include "qplib_rcfw.h" 47 #include "qplib_sp.h" 48 #include "qplib_fp.h" 49 50 static void bnxt_qplib_service_creq(unsigned long data); 51 52 /* Hardware communication channel */ 53 static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) 54 { 55 u16 cbit; 56 int rc; 57 58 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; 59 rc = wait_event_timeout(rcfw->waitq, 60 !test_bit(cbit, rcfw->cmdq_bitmap), 61 msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS)); 62 return rc ? 0 : -ETIMEDOUT; 63 }; 64 65 static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) 66 { 67 u32 count = RCFW_BLOCKED_CMD_WAIT_COUNT; 68 u16 cbit; 69 70 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; 71 if (!test_bit(cbit, rcfw->cmdq_bitmap)) 72 goto done; 73 do { 74 mdelay(1); /* 1m sec */ 75 bnxt_qplib_service_creq((unsigned long)rcfw); 76 } while (test_bit(cbit, rcfw->cmdq_bitmap) && --count); 77 done: 78 return count ? 0 : -ETIMEDOUT; 79 }; 80 81 static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, 82 struct creq_base *resp, void *sb, u8 is_block) 83 { 84 struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr; 85 struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; 86 struct bnxt_qplib_crsq *crsqe; 87 u32 sw_prod, cmdq_prod; 88 unsigned long flags; 89 u32 size, opcode; 90 u16 cookie, cbit; 91 u8 *preq; 92 93 opcode = req->opcode; 94 if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && 95 (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC && 96 opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW && 97 opcode != CMDQ_BASE_OPCODE_QUERY_VERSION)) { 98 dev_err(&rcfw->pdev->dev, 99 "QPLIB: RCFW not initialized, reject opcode 0x%x", 100 opcode); 101 return -EINVAL; 102 } 103 104 if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && 105 opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) { 106 dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!"); 107 return -EINVAL; 108 } 109 110 if (test_bit(FIRMWARE_TIMED_OUT, &rcfw->flags)) 111 return -ETIMEDOUT; 112 113 /* Cmdq are in 16-byte units, each request can consume 1 or more 114 * cmdqe 115 */ 116 spin_lock_irqsave(&cmdq->lock, flags); 117 if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) { 118 dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!"); 119 spin_unlock_irqrestore(&cmdq->lock, flags); 120 return -EAGAIN; 121 } 122 123 124 cookie = rcfw->seq_num & RCFW_MAX_COOKIE_VALUE; 125 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; 126 if (is_block) 127 cookie |= RCFW_CMD_IS_BLOCKING; 128 129 set_bit(cbit, rcfw->cmdq_bitmap); 130 req->cookie = cpu_to_le16(cookie); 131 crsqe = &rcfw->crsqe_tbl[cbit]; 132 if (crsqe->resp) { 133 spin_unlock_irqrestore(&cmdq->lock, flags); 134 return -EBUSY; 135 } 136 memset(resp, 0, sizeof(*resp)); 137 crsqe->resp = (struct creq_qp_event *)resp; 138 crsqe->resp->cookie = req->cookie; 139 crsqe->req_size = req->cmd_size; 140 if (req->resp_size && sb) { 141 struct bnxt_qplib_rcfw_sbuf *sbuf = sb; 142 143 req->resp_addr = cpu_to_le64(sbuf->dma_addr); 144 req->resp_size = (sbuf->size + BNXT_QPLIB_CMDQE_UNITS - 1) / 145 BNXT_QPLIB_CMDQE_UNITS; 146 } 147 148 cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr; 149 preq = (u8 *)req; 150 size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS; 151 do { 152 /* Locate the next cmdq slot */ 153 sw_prod = HWQ_CMP(cmdq->prod, cmdq); 154 cmdqe = &cmdq_ptr[get_cmdq_pg(sw_prod)][get_cmdq_idx(sw_prod)]; 155 if (!cmdqe) { 156 dev_err(&rcfw->pdev->dev, 157 "QPLIB: RCFW request failed with no cmdqe!"); 158 goto done; 159 } 160 /* Copy a segment of the req cmd to the cmdq */ 161 memset(cmdqe, 0, sizeof(*cmdqe)); 162 memcpy(cmdqe, preq, min_t(u32, size, sizeof(*cmdqe))); 163 preq += min_t(u32, size, sizeof(*cmdqe)); 164 size -= min_t(u32, size, sizeof(*cmdqe)); 165 cmdq->prod++; 166 rcfw->seq_num++; 167 } while (size > 0); 168 169 rcfw->seq_num++; 170 171 cmdq_prod = cmdq->prod; 172 if (test_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags)) { 173 /* The very first doorbell write 174 * is required to set this flag 175 * which prompts the FW to reset 176 * its internal pointers 177 */ 178 cmdq_prod |= BIT(FIRMWARE_FIRST_FLAG); 179 clear_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags); 180 } 181 182 /* ring CMDQ DB */ 183 wmb(); 184 writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem + 185 rcfw->cmdq_bar_reg_prod_off); 186 writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem + 187 rcfw->cmdq_bar_reg_trig_off); 188 done: 189 spin_unlock_irqrestore(&cmdq->lock, flags); 190 /* Return the CREQ response pointer */ 191 return 0; 192 } 193 194 int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, 195 struct cmdq_base *req, 196 struct creq_base *resp, 197 void *sb, u8 is_block) 198 { 199 struct creq_qp_event *evnt = (struct creq_qp_event *)resp; 200 u16 cookie; 201 u8 opcode, retry_cnt = 0xFF; 202 int rc = 0; 203 204 do { 205 opcode = req->opcode; 206 rc = __send_message(rcfw, req, resp, sb, is_block); 207 cookie = le16_to_cpu(req->cookie) & RCFW_MAX_COOKIE_VALUE; 208 if (!rc) 209 break; 210 211 if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) { 212 /* send failed */ 213 dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x send failed", 214 cookie, opcode); 215 return rc; 216 } 217 is_block ? mdelay(1) : usleep_range(500, 1000); 218 219 } while (retry_cnt--); 220 221 if (is_block) 222 rc = __block_for_resp(rcfw, cookie); 223 else 224 rc = __wait_for_resp(rcfw, cookie); 225 if (rc) { 226 /* timed out */ 227 dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec", 228 cookie, opcode, RCFW_CMD_WAIT_TIME_MS); 229 set_bit(FIRMWARE_TIMED_OUT, &rcfw->flags); 230 return rc; 231 } 232 233 if (evnt->status) { 234 /* failed with status */ 235 dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x status %#x", 236 cookie, opcode, evnt->status); 237 rc = -EFAULT; 238 } 239 240 return rc; 241 } 242 /* Completions */ 243 static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw, 244 struct creq_func_event *func_event) 245 { 246 switch (func_event->event) { 247 case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR: 248 break; 249 case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR: 250 break; 251 case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR: 252 break; 253 case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR: 254 break; 255 case CREQ_FUNC_EVENT_EVENT_CQ_ERROR: 256 break; 257 case CREQ_FUNC_EVENT_EVENT_TQM_ERROR: 258 break; 259 case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR: 260 break; 261 case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR: 262 /* SRQ ctx error, call srq_handler?? 263 * But there's no SRQ handle! 264 */ 265 break; 266 case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR: 267 break; 268 case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR: 269 break; 270 case CREQ_FUNC_EVENT_EVENT_TIM_ERROR: 271 break; 272 case CREQ_FUNC_EVENT_EVENT_VF_COMM_REQUEST: 273 break; 274 case CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED: 275 break; 276 default: 277 return -EINVAL; 278 } 279 return 0; 280 } 281 282 static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, 283 struct creq_qp_event *qp_event) 284 { 285 struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; 286 struct creq_qp_error_notification *err_event; 287 struct bnxt_qplib_crsq *crsqe; 288 unsigned long flags; 289 struct bnxt_qplib_qp *qp; 290 u16 cbit, blocked = 0; 291 u16 cookie; 292 __le16 mcookie; 293 u32 qp_id; 294 295 switch (qp_event->event) { 296 case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION: 297 err_event = (struct creq_qp_error_notification *)qp_event; 298 qp_id = le32_to_cpu(err_event->xid); 299 qp = rcfw->qp_tbl[qp_id].qp_handle; 300 dev_dbg(&rcfw->pdev->dev, 301 "QPLIB: Received QP error notification"); 302 dev_dbg(&rcfw->pdev->dev, 303 "QPLIB: qpid 0x%x, req_err=0x%x, resp_err=0x%x\n", 304 qp_id, err_event->req_err_state_reason, 305 err_event->res_err_state_reason); 306 if (!qp) 307 break; 308 bnxt_qplib_acquire_cq_locks(qp, &flags); 309 bnxt_qplib_mark_qp_error(qp); 310 bnxt_qplib_release_cq_locks(qp, &flags); 311 break; 312 default: 313 /* Command Response */ 314 spin_lock_irqsave(&cmdq->lock, flags); 315 cookie = le16_to_cpu(qp_event->cookie); 316 mcookie = qp_event->cookie; 317 blocked = cookie & RCFW_CMD_IS_BLOCKING; 318 cookie &= RCFW_MAX_COOKIE_VALUE; 319 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; 320 crsqe = &rcfw->crsqe_tbl[cbit]; 321 if (crsqe->resp && 322 crsqe->resp->cookie == mcookie) { 323 memcpy(crsqe->resp, qp_event, sizeof(*qp_event)); 324 crsqe->resp = NULL; 325 } else { 326 dev_err(&rcfw->pdev->dev, 327 "QPLIB: CMD %s resp->cookie = %#x, evnt->cookie = %#x", 328 crsqe->resp ? "mismatch" : "collision", 329 crsqe->resp ? crsqe->resp->cookie : 0, mcookie); 330 } 331 if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap)) 332 dev_warn(&rcfw->pdev->dev, 333 "QPLIB: CMD bit %d was not requested", cbit); 334 cmdq->cons += crsqe->req_size; 335 crsqe->req_size = 0; 336 337 if (!blocked) 338 wake_up(&rcfw->waitq); 339 spin_unlock_irqrestore(&cmdq->lock, flags); 340 } 341 return 0; 342 } 343 344 /* SP - CREQ Completion handlers */ 345 static void bnxt_qplib_service_creq(unsigned long data) 346 { 347 struct bnxt_qplib_rcfw *rcfw = (struct bnxt_qplib_rcfw *)data; 348 struct bnxt_qplib_hwq *creq = &rcfw->creq; 349 struct creq_base *creqe, **creq_ptr; 350 u32 sw_cons, raw_cons; 351 unsigned long flags; 352 u32 type, budget = CREQ_ENTRY_POLL_BUDGET; 353 354 /* Service the CREQ until budget is over */ 355 spin_lock_irqsave(&creq->lock, flags); 356 raw_cons = creq->cons; 357 while (budget > 0) { 358 sw_cons = HWQ_CMP(raw_cons, creq); 359 creq_ptr = (struct creq_base **)creq->pbl_ptr; 360 creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]; 361 if (!CREQ_CMP_VALID(creqe, raw_cons, creq->max_elements)) 362 break; 363 /* The valid test of the entry must be done first before 364 * reading any further. 365 */ 366 dma_rmb(); 367 368 type = creqe->type & CREQ_BASE_TYPE_MASK; 369 switch (type) { 370 case CREQ_BASE_TYPE_QP_EVENT: 371 bnxt_qplib_process_qp_event 372 (rcfw, (struct creq_qp_event *)creqe); 373 rcfw->creq_qp_event_processed++; 374 break; 375 case CREQ_BASE_TYPE_FUNC_EVENT: 376 if (!bnxt_qplib_process_func_event 377 (rcfw, (struct creq_func_event *)creqe)) 378 rcfw->creq_func_event_processed++; 379 else 380 dev_warn 381 (&rcfw->pdev->dev, "QPLIB:aeqe:%#x Not handled", 382 type); 383 break; 384 default: 385 dev_warn(&rcfw->pdev->dev, "QPLIB: creqe with "); 386 dev_warn(&rcfw->pdev->dev, 387 "QPLIB: op_event = 0x%x not handled", type); 388 break; 389 } 390 raw_cons++; 391 budget--; 392 } 393 394 if (creq->cons != raw_cons) { 395 creq->cons = raw_cons; 396 CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons, 397 creq->max_elements); 398 } 399 spin_unlock_irqrestore(&creq->lock, flags); 400 } 401 402 static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance) 403 { 404 struct bnxt_qplib_rcfw *rcfw = dev_instance; 405 struct bnxt_qplib_hwq *creq = &rcfw->creq; 406 struct creq_base **creq_ptr; 407 u32 sw_cons; 408 409 /* Prefetch the CREQ element */ 410 sw_cons = HWQ_CMP(creq->cons, creq); 411 creq_ptr = (struct creq_base **)rcfw->creq.pbl_ptr; 412 prefetch(&creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]); 413 414 tasklet_schedule(&rcfw->worker); 415 416 return IRQ_HANDLED; 417 } 418 419 /* RCFW */ 420 int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw) 421 { 422 struct cmdq_deinitialize_fw req; 423 struct creq_deinitialize_fw_resp resp; 424 u16 cmd_flags = 0; 425 int rc; 426 427 RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags); 428 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, 429 NULL, 0); 430 if (rc) 431 return rc; 432 433 clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); 434 return 0; 435 } 436 437 static int __get_pbl_pg_idx(struct bnxt_qplib_pbl *pbl) 438 { 439 return (pbl->pg_size == ROCE_PG_SIZE_4K ? 440 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K : 441 pbl->pg_size == ROCE_PG_SIZE_8K ? 442 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8K : 443 pbl->pg_size == ROCE_PG_SIZE_64K ? 444 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_64K : 445 pbl->pg_size == ROCE_PG_SIZE_2M ? 446 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_2M : 447 pbl->pg_size == ROCE_PG_SIZE_8M ? 448 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8M : 449 pbl->pg_size == ROCE_PG_SIZE_1G ? 450 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_1G : 451 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K); 452 } 453 454 int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, 455 struct bnxt_qplib_ctx *ctx, int is_virtfn) 456 { 457 struct cmdq_initialize_fw req; 458 struct creq_initialize_fw_resp resp; 459 u16 cmd_flags = 0, level; 460 int rc; 461 462 RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); 463 464 /* 465 * VFs need not setup the HW context area, PF 466 * shall setup this area for VF. Skipping the 467 * HW programming 468 */ 469 if (is_virtfn) 470 goto skip_ctx_setup; 471 472 level = ctx->qpc_tbl.level; 473 req.qpc_pg_size_qpc_lvl = (level << CMDQ_INITIALIZE_FW_QPC_LVL_SFT) | 474 __get_pbl_pg_idx(&ctx->qpc_tbl.pbl[level]); 475 level = ctx->mrw_tbl.level; 476 req.mrw_pg_size_mrw_lvl = (level << CMDQ_INITIALIZE_FW_MRW_LVL_SFT) | 477 __get_pbl_pg_idx(&ctx->mrw_tbl.pbl[level]); 478 level = ctx->srqc_tbl.level; 479 req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) | 480 __get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]); 481 level = ctx->cq_tbl.level; 482 req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) | 483 __get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]); 484 level = ctx->srqc_tbl.level; 485 req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) | 486 __get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]); 487 level = ctx->cq_tbl.level; 488 req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) | 489 __get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]); 490 level = ctx->tim_tbl.level; 491 req.tim_pg_size_tim_lvl = (level << CMDQ_INITIALIZE_FW_TIM_LVL_SFT) | 492 __get_pbl_pg_idx(&ctx->tim_tbl.pbl[level]); 493 level = ctx->tqm_pde_level; 494 req.tqm_pg_size_tqm_lvl = (level << CMDQ_INITIALIZE_FW_TQM_LVL_SFT) | 495 __get_pbl_pg_idx(&ctx->tqm_pde.pbl[level]); 496 497 req.qpc_page_dir = 498 cpu_to_le64(ctx->qpc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]); 499 req.mrw_page_dir = 500 cpu_to_le64(ctx->mrw_tbl.pbl[PBL_LVL_0].pg_map_arr[0]); 501 req.srq_page_dir = 502 cpu_to_le64(ctx->srqc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]); 503 req.cq_page_dir = 504 cpu_to_le64(ctx->cq_tbl.pbl[PBL_LVL_0].pg_map_arr[0]); 505 req.tim_page_dir = 506 cpu_to_le64(ctx->tim_tbl.pbl[PBL_LVL_0].pg_map_arr[0]); 507 req.tqm_page_dir = 508 cpu_to_le64(ctx->tqm_pde.pbl[PBL_LVL_0].pg_map_arr[0]); 509 510 req.number_of_qp = cpu_to_le32(ctx->qpc_tbl.max_elements); 511 req.number_of_mrw = cpu_to_le32(ctx->mrw_tbl.max_elements); 512 req.number_of_srq = cpu_to_le32(ctx->srqc_tbl.max_elements); 513 req.number_of_cq = cpu_to_le32(ctx->cq_tbl.max_elements); 514 515 req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf); 516 req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf); 517 req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf); 518 req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf); 519 req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf); 520 521 skip_ctx_setup: 522 req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id); 523 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, 524 NULL, 0); 525 if (rc) 526 return rc; 527 set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); 528 return 0; 529 } 530 531 void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) 532 { 533 kfree(rcfw->qp_tbl); 534 kfree(rcfw->crsqe_tbl); 535 bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq); 536 bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq); 537 rcfw->pdev = NULL; 538 } 539 540 int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev, 541 struct bnxt_qplib_rcfw *rcfw, 542 int qp_tbl_sz) 543 { 544 rcfw->pdev = pdev; 545 rcfw->creq.max_elements = BNXT_QPLIB_CREQE_MAX_CNT; 546 if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->creq, NULL, 0, 547 &rcfw->creq.max_elements, 548 BNXT_QPLIB_CREQE_UNITS, 0, PAGE_SIZE, 549 HWQ_TYPE_L2_CMPL)) { 550 dev_err(&rcfw->pdev->dev, 551 "QPLIB: HW channel CREQ allocation failed"); 552 goto fail; 553 } 554 rcfw->cmdq.max_elements = BNXT_QPLIB_CMDQE_MAX_CNT; 555 if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->cmdq, NULL, 0, 556 &rcfw->cmdq.max_elements, 557 BNXT_QPLIB_CMDQE_UNITS, 0, PAGE_SIZE, 558 HWQ_TYPE_CTX)) { 559 dev_err(&rcfw->pdev->dev, 560 "QPLIB: HW channel CMDQ allocation failed"); 561 goto fail; 562 } 563 564 rcfw->crsqe_tbl = kcalloc(rcfw->cmdq.max_elements, 565 sizeof(*rcfw->crsqe_tbl), GFP_KERNEL); 566 if (!rcfw->crsqe_tbl) 567 goto fail; 568 569 rcfw->qp_tbl_size = qp_tbl_sz; 570 rcfw->qp_tbl = kcalloc(qp_tbl_sz, sizeof(struct bnxt_qplib_qp_node), 571 GFP_KERNEL); 572 if (!rcfw->qp_tbl) 573 goto fail; 574 575 return 0; 576 577 fail: 578 bnxt_qplib_free_rcfw_channel(rcfw); 579 return -ENOMEM; 580 } 581 582 void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) 583 { 584 unsigned long indx; 585 586 /* Make sure the HW channel is stopped! */ 587 synchronize_irq(rcfw->vector); 588 tasklet_disable(&rcfw->worker); 589 tasklet_kill(&rcfw->worker); 590 591 if (rcfw->requested) { 592 free_irq(rcfw->vector, rcfw); 593 rcfw->requested = false; 594 } 595 if (rcfw->cmdq_bar_reg_iomem) 596 iounmap(rcfw->cmdq_bar_reg_iomem); 597 rcfw->cmdq_bar_reg_iomem = NULL; 598 599 if (rcfw->creq_bar_reg_iomem) 600 iounmap(rcfw->creq_bar_reg_iomem); 601 rcfw->creq_bar_reg_iomem = NULL; 602 603 indx = find_first_bit(rcfw->cmdq_bitmap, rcfw->bmap_size); 604 if (indx != rcfw->bmap_size) 605 dev_err(&rcfw->pdev->dev, 606 "QPLIB: disabling RCFW with pending cmd-bit %lx", indx); 607 kfree(rcfw->cmdq_bitmap); 608 rcfw->bmap_size = 0; 609 610 rcfw->aeq_handler = NULL; 611 rcfw->vector = 0; 612 } 613 614 int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, 615 struct bnxt_qplib_rcfw *rcfw, 616 int msix_vector, 617 int cp_bar_reg_off, int virt_fn, 618 int (*aeq_handler)(struct bnxt_qplib_rcfw *, 619 void *, void *)) 620 { 621 resource_size_t res_base; 622 struct cmdq_init init; 623 u16 bmap_size; 624 int rc; 625 626 /* General */ 627 rcfw->seq_num = 0; 628 set_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags); 629 bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD * 630 sizeof(unsigned long)); 631 rcfw->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL); 632 if (!rcfw->cmdq_bitmap) 633 return -ENOMEM; 634 rcfw->bmap_size = bmap_size; 635 636 /* CMDQ */ 637 rcfw->cmdq_bar_reg = RCFW_COMM_PCI_BAR_REGION; 638 res_base = pci_resource_start(pdev, rcfw->cmdq_bar_reg); 639 if (!res_base) 640 return -ENOMEM; 641 642 rcfw->cmdq_bar_reg_iomem = ioremap_nocache(res_base + 643 RCFW_COMM_BASE_OFFSET, 644 RCFW_COMM_SIZE); 645 if (!rcfw->cmdq_bar_reg_iomem) { 646 dev_err(&rcfw->pdev->dev, 647 "QPLIB: CMDQ BAR region %d mapping failed", 648 rcfw->cmdq_bar_reg); 649 return -ENOMEM; 650 } 651 652 rcfw->cmdq_bar_reg_prod_off = virt_fn ? RCFW_VF_COMM_PROD_OFFSET : 653 RCFW_PF_COMM_PROD_OFFSET; 654 655 rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET; 656 657 /* CREQ */ 658 rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION; 659 res_base = pci_resource_start(pdev, rcfw->creq_bar_reg); 660 if (!res_base) 661 dev_err(&rcfw->pdev->dev, 662 "QPLIB: CREQ BAR region %d resc start is 0!", 663 rcfw->creq_bar_reg); 664 rcfw->creq_bar_reg_iomem = ioremap_nocache(res_base + cp_bar_reg_off, 665 4); 666 if (!rcfw->creq_bar_reg_iomem) { 667 dev_err(&rcfw->pdev->dev, 668 "QPLIB: CREQ BAR region %d mapping failed", 669 rcfw->creq_bar_reg); 670 return -ENOMEM; 671 } 672 rcfw->creq_qp_event_processed = 0; 673 rcfw->creq_func_event_processed = 0; 674 675 rcfw->vector = msix_vector; 676 if (aeq_handler) 677 rcfw->aeq_handler = aeq_handler; 678 679 tasklet_init(&rcfw->worker, bnxt_qplib_service_creq, 680 (unsigned long)rcfw); 681 682 rcfw->requested = false; 683 rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0, 684 "bnxt_qplib_creq", rcfw); 685 if (rc) { 686 dev_err(&rcfw->pdev->dev, 687 "QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc); 688 bnxt_qplib_disable_rcfw_channel(rcfw); 689 return rc; 690 } 691 rcfw->requested = true; 692 693 init_waitqueue_head(&rcfw->waitq); 694 695 CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, 0, rcfw->creq.max_elements); 696 697 init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]); 698 init.cmdq_size_cmdq_lvl = cpu_to_le16( 699 ((BNXT_QPLIB_CMDQE_MAX_CNT << CMDQ_INIT_CMDQ_SIZE_SFT) & 700 CMDQ_INIT_CMDQ_SIZE_MASK) | 701 ((rcfw->cmdq.level << CMDQ_INIT_CMDQ_LVL_SFT) & 702 CMDQ_INIT_CMDQ_LVL_MASK)); 703 init.creq_ring_id = cpu_to_le16(rcfw->creq_ring_id); 704 705 /* Write to the Bono mailbox register */ 706 __iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4); 707 return 0; 708 } 709 710 struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf( 711 struct bnxt_qplib_rcfw *rcfw, 712 u32 size) 713 { 714 struct bnxt_qplib_rcfw_sbuf *sbuf; 715 716 sbuf = kzalloc(sizeof(*sbuf), GFP_ATOMIC); 717 if (!sbuf) 718 return NULL; 719 720 sbuf->size = size; 721 sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size, 722 &sbuf->dma_addr, GFP_ATOMIC); 723 if (!sbuf->sb) 724 goto bail; 725 726 return sbuf; 727 bail: 728 kfree(sbuf); 729 return NULL; 730 } 731 732 void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw, 733 struct bnxt_qplib_rcfw_sbuf *sbuf) 734 { 735 if (sbuf->sb) 736 dma_free_coherent(&rcfw->pdev->dev, sbuf->size, 737 sbuf->sb, sbuf->dma_addr); 738 kfree(sbuf); 739 } 740