1 /* 2 * Broadcom NetXtreme-E RoCE driver. 3 * 4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term 5 * Broadcom refers to Broadcom Limited and/or its subsidiaries. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in 21 * the documentation and/or other materials provided with the 22 * distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 * 36 * Description: RDMA Controller HW interface 37 */ 38 39 #define dev_fmt(fmt) "QPLIB: " fmt 40 41 #include <linux/interrupt.h> 42 #include <linux/spinlock.h> 43 #include <linux/pci.h> 44 #include <linux/prefetch.h> 45 #include <linux/delay.h> 46 47 #include "roce_hsi.h" 48 #include "qplib_res.h" 49 #include "qplib_rcfw.h" 50 #include "qplib_sp.h" 51 #include "qplib_fp.h" 52 53 static void bnxt_qplib_service_creq(unsigned long data); 54 55 /* Hardware communication channel */ 56 static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) 57 { 58 u16 cbit; 59 int rc; 60 61 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; 62 rc = wait_event_timeout(rcfw->waitq, 63 !test_bit(cbit, rcfw->cmdq_bitmap), 64 msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS)); 65 return rc ? 0 : -ETIMEDOUT; 66 }; 67 68 static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) 69 { 70 u32 count = RCFW_BLOCKED_CMD_WAIT_COUNT; 71 u16 cbit; 72 73 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; 74 if (!test_bit(cbit, rcfw->cmdq_bitmap)) 75 goto done; 76 do { 77 mdelay(1); /* 1m sec */ 78 bnxt_qplib_service_creq((unsigned long)rcfw); 79 } while (test_bit(cbit, rcfw->cmdq_bitmap) && --count); 80 done: 81 return count ? 0 : -ETIMEDOUT; 82 }; 83 84 static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, 85 struct creq_base *resp, void *sb, u8 is_block) 86 { 87 struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr; 88 struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; 89 struct bnxt_qplib_crsq *crsqe; 90 u32 sw_prod, cmdq_prod; 91 unsigned long flags; 92 u32 size, opcode; 93 u16 cookie, cbit; 94 u8 *preq; 95 96 opcode = req->opcode; 97 if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && 98 (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC && 99 opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW && 100 opcode != CMDQ_BASE_OPCODE_QUERY_VERSION)) { 101 dev_err(&rcfw->pdev->dev, 102 "RCFW not initialized, reject opcode 0x%x\n", opcode); 103 return -EINVAL; 104 } 105 106 if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && 107 opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) { 108 dev_err(&rcfw->pdev->dev, "RCFW already initialized!\n"); 109 return -EINVAL; 110 } 111 112 if (test_bit(FIRMWARE_TIMED_OUT, &rcfw->flags)) 113 return -ETIMEDOUT; 114 115 /* Cmdq are in 16-byte units, each request can consume 1 or more 116 * cmdqe 117 */ 118 spin_lock_irqsave(&cmdq->lock, flags); 119 if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) { 120 dev_err(&rcfw->pdev->dev, "RCFW: CMDQ is full!\n"); 121 spin_unlock_irqrestore(&cmdq->lock, flags); 122 return -EAGAIN; 123 } 124 125 126 cookie = rcfw->seq_num & RCFW_MAX_COOKIE_VALUE; 127 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; 128 if (is_block) 129 cookie |= RCFW_CMD_IS_BLOCKING; 130 131 set_bit(cbit, rcfw->cmdq_bitmap); 132 req->cookie = cpu_to_le16(cookie); 133 crsqe = &rcfw->crsqe_tbl[cbit]; 134 if (crsqe->resp) { 135 spin_unlock_irqrestore(&cmdq->lock, flags); 136 return -EBUSY; 137 } 138 memset(resp, 0, sizeof(*resp)); 139 crsqe->resp = (struct creq_qp_event *)resp; 140 crsqe->resp->cookie = req->cookie; 141 crsqe->req_size = req->cmd_size; 142 if (req->resp_size && sb) { 143 struct bnxt_qplib_rcfw_sbuf *sbuf = sb; 144 145 req->resp_addr = cpu_to_le64(sbuf->dma_addr); 146 req->resp_size = (sbuf->size + BNXT_QPLIB_CMDQE_UNITS - 1) / 147 BNXT_QPLIB_CMDQE_UNITS; 148 } 149 150 cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr; 151 preq = (u8 *)req; 152 size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS; 153 do { 154 /* Locate the next cmdq slot */ 155 sw_prod = HWQ_CMP(cmdq->prod, cmdq); 156 cmdqe = &cmdq_ptr[get_cmdq_pg(sw_prod)][get_cmdq_idx(sw_prod)]; 157 if (!cmdqe) { 158 dev_err(&rcfw->pdev->dev, 159 "RCFW request failed with no cmdqe!\n"); 160 goto done; 161 } 162 /* Copy a segment of the req cmd to the cmdq */ 163 memset(cmdqe, 0, sizeof(*cmdqe)); 164 memcpy(cmdqe, preq, min_t(u32, size, sizeof(*cmdqe))); 165 preq += min_t(u32, size, sizeof(*cmdqe)); 166 size -= min_t(u32, size, sizeof(*cmdqe)); 167 cmdq->prod++; 168 rcfw->seq_num++; 169 } while (size > 0); 170 171 rcfw->seq_num++; 172 173 cmdq_prod = cmdq->prod; 174 if (test_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags)) { 175 /* The very first doorbell write 176 * is required to set this flag 177 * which prompts the FW to reset 178 * its internal pointers 179 */ 180 cmdq_prod |= BIT(FIRMWARE_FIRST_FLAG); 181 clear_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags); 182 } 183 184 /* ring CMDQ DB */ 185 wmb(); 186 writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem + 187 rcfw->cmdq_bar_reg_prod_off); 188 writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem + 189 rcfw->cmdq_bar_reg_trig_off); 190 done: 191 spin_unlock_irqrestore(&cmdq->lock, flags); 192 /* Return the CREQ response pointer */ 193 return 0; 194 } 195 196 int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, 197 struct cmdq_base *req, 198 struct creq_base *resp, 199 void *sb, u8 is_block) 200 { 201 struct creq_qp_event *evnt = (struct creq_qp_event *)resp; 202 u16 cookie; 203 u8 opcode, retry_cnt = 0xFF; 204 int rc = 0; 205 206 do { 207 opcode = req->opcode; 208 rc = __send_message(rcfw, req, resp, sb, is_block); 209 cookie = le16_to_cpu(req->cookie) & RCFW_MAX_COOKIE_VALUE; 210 if (!rc) 211 break; 212 213 if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) { 214 /* send failed */ 215 dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x send failed\n", 216 cookie, opcode); 217 return rc; 218 } 219 is_block ? mdelay(1) : usleep_range(500, 1000); 220 221 } while (retry_cnt--); 222 223 if (is_block) 224 rc = __block_for_resp(rcfw, cookie); 225 else 226 rc = __wait_for_resp(rcfw, cookie); 227 if (rc) { 228 /* timed out */ 229 dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x timedout (%d)msec\n", 230 cookie, opcode, RCFW_CMD_WAIT_TIME_MS); 231 set_bit(FIRMWARE_TIMED_OUT, &rcfw->flags); 232 return rc; 233 } 234 235 if (evnt->status) { 236 /* failed with status */ 237 dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x status %#x\n", 238 cookie, opcode, evnt->status); 239 rc = -EFAULT; 240 } 241 242 return rc; 243 } 244 /* Completions */ 245 static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw, 246 struct creq_func_event *func_event) 247 { 248 switch (func_event->event) { 249 case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR: 250 break; 251 case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR: 252 break; 253 case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR: 254 break; 255 case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR: 256 break; 257 case CREQ_FUNC_EVENT_EVENT_CQ_ERROR: 258 break; 259 case CREQ_FUNC_EVENT_EVENT_TQM_ERROR: 260 break; 261 case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR: 262 break; 263 case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR: 264 /* SRQ ctx error, call srq_handler?? 265 * But there's no SRQ handle! 266 */ 267 break; 268 case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR: 269 break; 270 case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR: 271 break; 272 case CREQ_FUNC_EVENT_EVENT_TIM_ERROR: 273 break; 274 case CREQ_FUNC_EVENT_EVENT_VF_COMM_REQUEST: 275 break; 276 case CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED: 277 break; 278 default: 279 return -EINVAL; 280 } 281 return 0; 282 } 283 284 static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, 285 struct creq_qp_event *qp_event) 286 { 287 struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; 288 struct creq_qp_error_notification *err_event; 289 struct bnxt_qplib_crsq *crsqe; 290 unsigned long flags; 291 struct bnxt_qplib_qp *qp; 292 u16 cbit, blocked = 0; 293 u16 cookie; 294 __le16 mcookie; 295 u32 qp_id; 296 297 switch (qp_event->event) { 298 case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION: 299 err_event = (struct creq_qp_error_notification *)qp_event; 300 qp_id = le32_to_cpu(err_event->xid); 301 qp = rcfw->qp_tbl[qp_id].qp_handle; 302 dev_dbg(&rcfw->pdev->dev, 303 "Received QP error notification\n"); 304 dev_dbg(&rcfw->pdev->dev, 305 "qpid 0x%x, req_err=0x%x, resp_err=0x%x\n", 306 qp_id, err_event->req_err_state_reason, 307 err_event->res_err_state_reason); 308 if (!qp) 309 break; 310 bnxt_qplib_mark_qp_error(qp); 311 rcfw->aeq_handler(rcfw, qp_event, qp); 312 break; 313 default: 314 /* 315 * Command Response 316 * cmdq->lock needs to be acquired to synchronie 317 * the command send and completion reaping. This function 318 * is always called with creq->lock held. Using 319 * the nested variant of spin_lock. 320 * 321 */ 322 323 spin_lock_irqsave_nested(&cmdq->lock, flags, 324 SINGLE_DEPTH_NESTING); 325 cookie = le16_to_cpu(qp_event->cookie); 326 mcookie = qp_event->cookie; 327 blocked = cookie & RCFW_CMD_IS_BLOCKING; 328 cookie &= RCFW_MAX_COOKIE_VALUE; 329 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; 330 crsqe = &rcfw->crsqe_tbl[cbit]; 331 if (crsqe->resp && 332 crsqe->resp->cookie == mcookie) { 333 memcpy(crsqe->resp, qp_event, sizeof(*qp_event)); 334 crsqe->resp = NULL; 335 } else { 336 if (crsqe->resp && crsqe->resp->cookie) 337 dev_err(&rcfw->pdev->dev, 338 "CMD %s cookie sent=%#x, recd=%#x\n", 339 crsqe->resp ? "mismatch" : "collision", 340 crsqe->resp ? crsqe->resp->cookie : 0, 341 mcookie); 342 } 343 if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap)) 344 dev_warn(&rcfw->pdev->dev, 345 "CMD bit %d was not requested\n", cbit); 346 cmdq->cons += crsqe->req_size; 347 crsqe->req_size = 0; 348 349 if (!blocked) 350 wake_up(&rcfw->waitq); 351 spin_unlock_irqrestore(&cmdq->lock, flags); 352 } 353 return 0; 354 } 355 356 /* SP - CREQ Completion handlers */ 357 static void bnxt_qplib_service_creq(unsigned long data) 358 { 359 struct bnxt_qplib_rcfw *rcfw = (struct bnxt_qplib_rcfw *)data; 360 struct bnxt_qplib_hwq *creq = &rcfw->creq; 361 struct creq_base *creqe, **creq_ptr; 362 u32 sw_cons, raw_cons; 363 unsigned long flags; 364 u32 type, budget = CREQ_ENTRY_POLL_BUDGET; 365 366 /* Service the CREQ until budget is over */ 367 spin_lock_irqsave(&creq->lock, flags); 368 raw_cons = creq->cons; 369 while (budget > 0) { 370 sw_cons = HWQ_CMP(raw_cons, creq); 371 creq_ptr = (struct creq_base **)creq->pbl_ptr; 372 creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]; 373 if (!CREQ_CMP_VALID(creqe, raw_cons, creq->max_elements)) 374 break; 375 /* The valid test of the entry must be done first before 376 * reading any further. 377 */ 378 dma_rmb(); 379 380 type = creqe->type & CREQ_BASE_TYPE_MASK; 381 switch (type) { 382 case CREQ_BASE_TYPE_QP_EVENT: 383 bnxt_qplib_process_qp_event 384 (rcfw, (struct creq_qp_event *)creqe); 385 rcfw->creq_qp_event_processed++; 386 break; 387 case CREQ_BASE_TYPE_FUNC_EVENT: 388 if (!bnxt_qplib_process_func_event 389 (rcfw, (struct creq_func_event *)creqe)) 390 rcfw->creq_func_event_processed++; 391 else 392 dev_warn(&rcfw->pdev->dev, 393 "aeqe:%#x Not handled\n", type); 394 break; 395 default: 396 if (type != ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT) 397 dev_warn(&rcfw->pdev->dev, 398 "creqe with event 0x%x not handled\n", 399 type); 400 break; 401 } 402 raw_cons++; 403 budget--; 404 } 405 406 if (creq->cons != raw_cons) { 407 creq->cons = raw_cons; 408 CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons, 409 creq->max_elements); 410 } 411 spin_unlock_irqrestore(&creq->lock, flags); 412 } 413 414 static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance) 415 { 416 struct bnxt_qplib_rcfw *rcfw = dev_instance; 417 struct bnxt_qplib_hwq *creq = &rcfw->creq; 418 struct creq_base **creq_ptr; 419 u32 sw_cons; 420 421 /* Prefetch the CREQ element */ 422 sw_cons = HWQ_CMP(creq->cons, creq); 423 creq_ptr = (struct creq_base **)rcfw->creq.pbl_ptr; 424 prefetch(&creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]); 425 426 tasklet_schedule(&rcfw->worker); 427 428 return IRQ_HANDLED; 429 } 430 431 /* RCFW */ 432 int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw) 433 { 434 struct cmdq_deinitialize_fw req; 435 struct creq_deinitialize_fw_resp resp; 436 u16 cmd_flags = 0; 437 int rc; 438 439 RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags); 440 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, 441 NULL, 0); 442 if (rc) 443 return rc; 444 445 clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); 446 return 0; 447 } 448 449 static int __get_pbl_pg_idx(struct bnxt_qplib_pbl *pbl) 450 { 451 return (pbl->pg_size == ROCE_PG_SIZE_4K ? 452 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K : 453 pbl->pg_size == ROCE_PG_SIZE_8K ? 454 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8K : 455 pbl->pg_size == ROCE_PG_SIZE_64K ? 456 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_64K : 457 pbl->pg_size == ROCE_PG_SIZE_2M ? 458 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_2M : 459 pbl->pg_size == ROCE_PG_SIZE_8M ? 460 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8M : 461 pbl->pg_size == ROCE_PG_SIZE_1G ? 462 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_1G : 463 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K); 464 } 465 466 int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, 467 struct bnxt_qplib_ctx *ctx, int is_virtfn) 468 { 469 struct cmdq_initialize_fw req; 470 struct creq_initialize_fw_resp resp; 471 u16 cmd_flags = 0, level; 472 int rc; 473 474 RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); 475 /* Supply (log-base-2-of-host-page-size - base-page-shift) 476 * to bono to adjust the doorbell page sizes. 477 */ 478 req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT - 479 RCFW_DBR_BASE_PAGE_SHIFT); 480 /* 481 * VFs need not setup the HW context area, PF 482 * shall setup this area for VF. Skipping the 483 * HW programming 484 */ 485 if (is_virtfn) 486 goto skip_ctx_setup; 487 488 level = ctx->qpc_tbl.level; 489 req.qpc_pg_size_qpc_lvl = (level << CMDQ_INITIALIZE_FW_QPC_LVL_SFT) | 490 __get_pbl_pg_idx(&ctx->qpc_tbl.pbl[level]); 491 level = ctx->mrw_tbl.level; 492 req.mrw_pg_size_mrw_lvl = (level << CMDQ_INITIALIZE_FW_MRW_LVL_SFT) | 493 __get_pbl_pg_idx(&ctx->mrw_tbl.pbl[level]); 494 level = ctx->srqc_tbl.level; 495 req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) | 496 __get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]); 497 level = ctx->cq_tbl.level; 498 req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) | 499 __get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]); 500 level = ctx->srqc_tbl.level; 501 req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) | 502 __get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]); 503 level = ctx->cq_tbl.level; 504 req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) | 505 __get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]); 506 level = ctx->tim_tbl.level; 507 req.tim_pg_size_tim_lvl = (level << CMDQ_INITIALIZE_FW_TIM_LVL_SFT) | 508 __get_pbl_pg_idx(&ctx->tim_tbl.pbl[level]); 509 level = ctx->tqm_pde_level; 510 req.tqm_pg_size_tqm_lvl = (level << CMDQ_INITIALIZE_FW_TQM_LVL_SFT) | 511 __get_pbl_pg_idx(&ctx->tqm_pde.pbl[level]); 512 513 req.qpc_page_dir = 514 cpu_to_le64(ctx->qpc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]); 515 req.mrw_page_dir = 516 cpu_to_le64(ctx->mrw_tbl.pbl[PBL_LVL_0].pg_map_arr[0]); 517 req.srq_page_dir = 518 cpu_to_le64(ctx->srqc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]); 519 req.cq_page_dir = 520 cpu_to_le64(ctx->cq_tbl.pbl[PBL_LVL_0].pg_map_arr[0]); 521 req.tim_page_dir = 522 cpu_to_le64(ctx->tim_tbl.pbl[PBL_LVL_0].pg_map_arr[0]); 523 req.tqm_page_dir = 524 cpu_to_le64(ctx->tqm_pde.pbl[PBL_LVL_0].pg_map_arr[0]); 525 526 req.number_of_qp = cpu_to_le32(ctx->qpc_tbl.max_elements); 527 req.number_of_mrw = cpu_to_le32(ctx->mrw_tbl.max_elements); 528 req.number_of_srq = cpu_to_le32(ctx->srqc_tbl.max_elements); 529 req.number_of_cq = cpu_to_le32(ctx->cq_tbl.max_elements); 530 531 req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf); 532 req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf); 533 req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf); 534 req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf); 535 req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf); 536 537 skip_ctx_setup: 538 req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id); 539 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, 540 NULL, 0); 541 if (rc) 542 return rc; 543 set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); 544 return 0; 545 } 546 547 void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) 548 { 549 kfree(rcfw->qp_tbl); 550 kfree(rcfw->crsqe_tbl); 551 bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq); 552 bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq); 553 rcfw->pdev = NULL; 554 } 555 556 int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev, 557 struct bnxt_qplib_rcfw *rcfw, 558 int qp_tbl_sz) 559 { 560 rcfw->pdev = pdev; 561 rcfw->creq.max_elements = BNXT_QPLIB_CREQE_MAX_CNT; 562 if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->creq, NULL, 0, 563 &rcfw->creq.max_elements, 564 BNXT_QPLIB_CREQE_UNITS, 0, PAGE_SIZE, 565 HWQ_TYPE_L2_CMPL)) { 566 dev_err(&rcfw->pdev->dev, 567 "HW channel CREQ allocation failed\n"); 568 goto fail; 569 } 570 rcfw->cmdq.max_elements = BNXT_QPLIB_CMDQE_MAX_CNT; 571 if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->cmdq, NULL, 0, 572 &rcfw->cmdq.max_elements, 573 BNXT_QPLIB_CMDQE_UNITS, 0, PAGE_SIZE, 574 HWQ_TYPE_CTX)) { 575 dev_err(&rcfw->pdev->dev, 576 "HW channel CMDQ allocation failed\n"); 577 goto fail; 578 } 579 580 rcfw->crsqe_tbl = kcalloc(rcfw->cmdq.max_elements, 581 sizeof(*rcfw->crsqe_tbl), GFP_KERNEL); 582 if (!rcfw->crsqe_tbl) 583 goto fail; 584 585 rcfw->qp_tbl_size = qp_tbl_sz; 586 rcfw->qp_tbl = kcalloc(qp_tbl_sz, sizeof(struct bnxt_qplib_qp_node), 587 GFP_KERNEL); 588 if (!rcfw->qp_tbl) 589 goto fail; 590 591 return 0; 592 593 fail: 594 bnxt_qplib_free_rcfw_channel(rcfw); 595 return -ENOMEM; 596 } 597 598 void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill) 599 { 600 tasklet_disable(&rcfw->worker); 601 /* Mask h/w interrupts */ 602 CREQ_DB(rcfw->creq_bar_reg_iomem, rcfw->creq.cons, 603 rcfw->creq.max_elements); 604 /* Sync with last running IRQ-handler */ 605 synchronize_irq(rcfw->vector); 606 if (kill) 607 tasklet_kill(&rcfw->worker); 608 609 if (rcfw->requested) { 610 free_irq(rcfw->vector, rcfw); 611 rcfw->requested = false; 612 } 613 } 614 615 void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) 616 { 617 unsigned long indx; 618 619 bnxt_qplib_rcfw_stop_irq(rcfw, true); 620 621 iounmap(rcfw->cmdq_bar_reg_iomem); 622 iounmap(rcfw->creq_bar_reg_iomem); 623 624 indx = find_first_bit(rcfw->cmdq_bitmap, rcfw->bmap_size); 625 if (indx != rcfw->bmap_size) 626 dev_err(&rcfw->pdev->dev, 627 "disabling RCFW with pending cmd-bit %lx\n", indx); 628 kfree(rcfw->cmdq_bitmap); 629 rcfw->bmap_size = 0; 630 631 rcfw->cmdq_bar_reg_iomem = NULL; 632 rcfw->creq_bar_reg_iomem = NULL; 633 rcfw->aeq_handler = NULL; 634 rcfw->vector = 0; 635 } 636 637 int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector, 638 bool need_init) 639 { 640 int rc; 641 642 if (rcfw->requested) 643 return -EFAULT; 644 645 rcfw->vector = msix_vector; 646 if (need_init) 647 tasklet_init(&rcfw->worker, 648 bnxt_qplib_service_creq, (unsigned long)rcfw); 649 else 650 tasklet_enable(&rcfw->worker); 651 rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0, 652 "bnxt_qplib_creq", rcfw); 653 if (rc) 654 return rc; 655 rcfw->requested = true; 656 CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, rcfw->creq.cons, 657 rcfw->creq.max_elements); 658 659 return 0; 660 } 661 662 int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, 663 struct bnxt_qplib_rcfw *rcfw, 664 int msix_vector, 665 int cp_bar_reg_off, int virt_fn, 666 int (*aeq_handler)(struct bnxt_qplib_rcfw *, 667 void *, void *)) 668 { 669 resource_size_t res_base; 670 struct cmdq_init init; 671 u16 bmap_size; 672 int rc; 673 674 /* General */ 675 rcfw->seq_num = 0; 676 set_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags); 677 bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD * 678 sizeof(unsigned long)); 679 rcfw->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL); 680 if (!rcfw->cmdq_bitmap) 681 return -ENOMEM; 682 rcfw->bmap_size = bmap_size; 683 684 /* CMDQ */ 685 rcfw->cmdq_bar_reg = RCFW_COMM_PCI_BAR_REGION; 686 res_base = pci_resource_start(pdev, rcfw->cmdq_bar_reg); 687 if (!res_base) 688 return -ENOMEM; 689 690 rcfw->cmdq_bar_reg_iomem = ioremap_nocache(res_base + 691 RCFW_COMM_BASE_OFFSET, 692 RCFW_COMM_SIZE); 693 if (!rcfw->cmdq_bar_reg_iomem) { 694 dev_err(&rcfw->pdev->dev, "CMDQ BAR region %d mapping failed\n", 695 rcfw->cmdq_bar_reg); 696 return -ENOMEM; 697 } 698 699 rcfw->cmdq_bar_reg_prod_off = virt_fn ? RCFW_VF_COMM_PROD_OFFSET : 700 RCFW_PF_COMM_PROD_OFFSET; 701 702 rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET; 703 704 /* CREQ */ 705 rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION; 706 res_base = pci_resource_start(pdev, rcfw->creq_bar_reg); 707 if (!res_base) 708 dev_err(&rcfw->pdev->dev, 709 "CREQ BAR region %d resc start is 0!\n", 710 rcfw->creq_bar_reg); 711 rcfw->creq_bar_reg_iomem = ioremap_nocache(res_base + cp_bar_reg_off, 712 4); 713 if (!rcfw->creq_bar_reg_iomem) { 714 dev_err(&rcfw->pdev->dev, "CREQ BAR region %d mapping failed\n", 715 rcfw->creq_bar_reg); 716 iounmap(rcfw->cmdq_bar_reg_iomem); 717 rcfw->cmdq_bar_reg_iomem = NULL; 718 return -ENOMEM; 719 } 720 rcfw->creq_qp_event_processed = 0; 721 rcfw->creq_func_event_processed = 0; 722 723 if (aeq_handler) 724 rcfw->aeq_handler = aeq_handler; 725 init_waitqueue_head(&rcfw->waitq); 726 727 rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector, true); 728 if (rc) { 729 dev_err(&rcfw->pdev->dev, 730 "Failed to request IRQ for CREQ rc = 0x%x\n", rc); 731 bnxt_qplib_disable_rcfw_channel(rcfw); 732 return rc; 733 } 734 735 init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]); 736 init.cmdq_size_cmdq_lvl = cpu_to_le16( 737 ((BNXT_QPLIB_CMDQE_MAX_CNT << CMDQ_INIT_CMDQ_SIZE_SFT) & 738 CMDQ_INIT_CMDQ_SIZE_MASK) | 739 ((rcfw->cmdq.level << CMDQ_INIT_CMDQ_LVL_SFT) & 740 CMDQ_INIT_CMDQ_LVL_MASK)); 741 init.creq_ring_id = cpu_to_le16(rcfw->creq_ring_id); 742 743 /* Write to the Bono mailbox register */ 744 __iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4); 745 return 0; 746 } 747 748 struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf( 749 struct bnxt_qplib_rcfw *rcfw, 750 u32 size) 751 { 752 struct bnxt_qplib_rcfw_sbuf *sbuf; 753 754 sbuf = kzalloc(sizeof(*sbuf), GFP_ATOMIC); 755 if (!sbuf) 756 return NULL; 757 758 sbuf->size = size; 759 sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size, 760 &sbuf->dma_addr, GFP_ATOMIC); 761 if (!sbuf->sb) 762 goto bail; 763 764 return sbuf; 765 bail: 766 kfree(sbuf); 767 return NULL; 768 } 769 770 void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw, 771 struct bnxt_qplib_rcfw_sbuf *sbuf) 772 { 773 if (sbuf->sb) 774 dma_free_coherent(&rcfw->pdev->dev, sbuf->size, 775 sbuf->sb, sbuf->dma_addr); 776 kfree(sbuf); 777 } 778