1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: RDMA Controller HW interface
37  */
38 #include <linux/interrupt.h>
39 #include <linux/spinlock.h>
40 #include <linux/pci.h>
41 #include <linux/prefetch.h>
42 #include <linux/delay.h>
43 
44 #include "roce_hsi.h"
45 #include "qplib_res.h"
46 #include "qplib_rcfw.h"
47 static void bnxt_qplib_service_creq(unsigned long data);
48 
49 /* Hardware communication channel */
50 static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
51 {
52 	u16 cbit;
53 	int rc;
54 
55 	cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
56 	rc = wait_event_timeout(rcfw->waitq,
57 				!test_bit(cbit, rcfw->cmdq_bitmap),
58 				msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS));
59 	return rc ? 0 : -ETIMEDOUT;
60 };
61 
62 static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
63 {
64 	u32 count = RCFW_BLOCKED_CMD_WAIT_COUNT;
65 	u16 cbit;
66 
67 	cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
68 	if (!test_bit(cbit, rcfw->cmdq_bitmap))
69 		goto done;
70 	do {
71 		mdelay(1); /* 1m sec */
72 		bnxt_qplib_service_creq((unsigned long)rcfw);
73 	} while (test_bit(cbit, rcfw->cmdq_bitmap) && --count);
74 done:
75 	return count ? 0 : -ETIMEDOUT;
76 };
77 
78 static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
79 			  struct creq_base *resp, void *sb, u8 is_block)
80 {
81 	struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr;
82 	struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
83 	struct bnxt_qplib_crsq *crsqe;
84 	u32 sw_prod, cmdq_prod;
85 	unsigned long flags;
86 	u32 size, opcode;
87 	u16 cookie, cbit;
88 	int pg, idx;
89 	u8 *preq;
90 
91 	opcode = req->opcode;
92 	if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
93 	    (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
94 	     opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW)) {
95 		dev_err(&rcfw->pdev->dev,
96 			"QPLIB: RCFW not initialized, reject opcode 0x%x",
97 			opcode);
98 		return -EINVAL;
99 	}
100 
101 	if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
102 	    opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) {
103 		dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!");
104 		return -EINVAL;
105 	}
106 
107 	/* Cmdq are in 16-byte units, each request can consume 1 or more
108 	 * cmdqe
109 	 */
110 	spin_lock_irqsave(&cmdq->lock, flags);
111 	if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) {
112 		dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!");
113 		spin_unlock_irqrestore(&cmdq->lock, flags);
114 		return -EAGAIN;
115 	}
116 
117 
118 	cookie = rcfw->seq_num & RCFW_MAX_COOKIE_VALUE;
119 	cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
120 	if (is_block)
121 		cookie |= RCFW_CMD_IS_BLOCKING;
122 
123 	set_bit(cbit, rcfw->cmdq_bitmap);
124 	req->cookie = cpu_to_le16(cookie);
125 	crsqe = &rcfw->crsqe_tbl[cbit];
126 	if (crsqe->resp) {
127 		spin_unlock_irqrestore(&cmdq->lock, flags);
128 		return -EBUSY;
129 	}
130 	memset(resp, 0, sizeof(*resp));
131 	crsqe->resp = (struct creq_qp_event *)resp;
132 	crsqe->resp->cookie = req->cookie;
133 	crsqe->req_size = req->cmd_size;
134 	if (req->resp_size && sb) {
135 		struct bnxt_qplib_rcfw_sbuf *sbuf = sb;
136 
137 		req->resp_addr = cpu_to_le64(sbuf->dma_addr);
138 		req->resp_size = (sbuf->size + BNXT_QPLIB_CMDQE_UNITS - 1) /
139 				  BNXT_QPLIB_CMDQE_UNITS;
140 	}
141 
142 	cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr;
143 	preq = (u8 *)req;
144 	size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
145 	do {
146 		pg = 0;
147 		idx = 0;
148 
149 		/* Locate the next cmdq slot */
150 		sw_prod = HWQ_CMP(cmdq->prod, cmdq);
151 		cmdqe = &cmdq_ptr[get_cmdq_pg(sw_prod)][get_cmdq_idx(sw_prod)];
152 		if (!cmdqe) {
153 			dev_err(&rcfw->pdev->dev,
154 				"QPLIB: RCFW request failed with no cmdqe!");
155 			goto done;
156 		}
157 		/* Copy a segment of the req cmd to the cmdq */
158 		memset(cmdqe, 0, sizeof(*cmdqe));
159 		memcpy(cmdqe, preq, min_t(u32, size, sizeof(*cmdqe)));
160 		preq += min_t(u32, size, sizeof(*cmdqe));
161 		size -= min_t(u32, size, sizeof(*cmdqe));
162 		cmdq->prod++;
163 		rcfw->seq_num++;
164 	} while (size > 0);
165 
166 	rcfw->seq_num++;
167 
168 	cmdq_prod = cmdq->prod;
169 	if (rcfw->flags & FIRMWARE_FIRST_FLAG) {
170 		/* The very first doorbell write
171 		 * is required to set this flag
172 		 * which prompts the FW to reset
173 		 * its internal pointers
174 		 */
175 		cmdq_prod |= FIRMWARE_FIRST_FLAG;
176 		rcfw->flags &= ~FIRMWARE_FIRST_FLAG;
177 	}
178 
179 	/* ring CMDQ DB */
180 	wmb();
181 	writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem +
182 	       rcfw->cmdq_bar_reg_prod_off);
183 	writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem +
184 	       rcfw->cmdq_bar_reg_trig_off);
185 done:
186 	spin_unlock_irqrestore(&cmdq->lock, flags);
187 	/* Return the CREQ response pointer */
188 	return 0;
189 }
190 
191 int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
192 				 struct cmdq_base *req,
193 				 struct creq_base *resp,
194 				 void *sb, u8 is_block)
195 {
196 	struct creq_qp_event *evnt = (struct creq_qp_event *)resp;
197 	u16 cookie;
198 	u8 opcode, retry_cnt = 0xFF;
199 	int rc = 0;
200 
201 	do {
202 		opcode = req->opcode;
203 		rc = __send_message(rcfw, req, resp, sb, is_block);
204 		cookie = le16_to_cpu(req->cookie) & RCFW_MAX_COOKIE_VALUE;
205 		if (!rc)
206 			break;
207 
208 		if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) {
209 			/* send failed */
210 			dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x send failed",
211 				cookie, opcode);
212 			return rc;
213 		}
214 		is_block ? mdelay(1) : usleep_range(500, 1000);
215 
216 	} while (retry_cnt--);
217 
218 	if (is_block)
219 		rc = __block_for_resp(rcfw, cookie);
220 	else
221 		rc = __wait_for_resp(rcfw, cookie);
222 	if (rc) {
223 		/* timed out */
224 		dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec",
225 			cookie, opcode, RCFW_CMD_WAIT_TIME_MS);
226 		return rc;
227 	}
228 
229 	if (evnt->status) {
230 		/* failed with status */
231 		dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x status %#x",
232 			cookie, opcode, evnt->status);
233 		rc = -EFAULT;
234 	}
235 
236 	return rc;
237 }
238 /* Completions */
239 static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
240 					 struct creq_func_event *func_event)
241 {
242 	switch (func_event->event) {
243 	case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR:
244 		break;
245 	case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR:
246 		break;
247 	case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR:
248 		break;
249 	case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR:
250 		break;
251 	case CREQ_FUNC_EVENT_EVENT_CQ_ERROR:
252 		break;
253 	case CREQ_FUNC_EVENT_EVENT_TQM_ERROR:
254 		break;
255 	case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR:
256 		break;
257 	case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR:
258 		/* SRQ ctx error, call srq_handler??
259 		 * But there's no SRQ handle!
260 		 */
261 		break;
262 	case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR:
263 		break;
264 	case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR:
265 		break;
266 	case CREQ_FUNC_EVENT_EVENT_TIM_ERROR:
267 		break;
268 	case CREQ_FUNC_EVENT_EVENT_VF_COMM_REQUEST:
269 		break;
270 	case CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED:
271 		break;
272 	default:
273 		return -EINVAL;
274 	}
275 	return 0;
276 }
277 
278 static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
279 				       struct creq_qp_event *qp_event)
280 {
281 	struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
282 	struct bnxt_qplib_crsq *crsqe;
283 	unsigned long flags;
284 	u16 cbit, blocked = 0;
285 	u16 cookie;
286 	__le16  mcookie;
287 
288 	switch (qp_event->event) {
289 	case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
290 		dev_dbg(&rcfw->pdev->dev,
291 			"QPLIB: Received QP error notification");
292 		break;
293 	default:
294 		/* Command Response */
295 		spin_lock_irqsave(&cmdq->lock, flags);
296 		cookie = le16_to_cpu(qp_event->cookie);
297 		mcookie = qp_event->cookie;
298 		blocked = cookie & RCFW_CMD_IS_BLOCKING;
299 		cookie &= RCFW_MAX_COOKIE_VALUE;
300 		cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
301 		crsqe = &rcfw->crsqe_tbl[cbit];
302 		if (crsqe->resp &&
303 		    crsqe->resp->cookie  == mcookie) {
304 			memcpy(crsqe->resp, qp_event, sizeof(*qp_event));
305 			crsqe->resp = NULL;
306 		} else {
307 			dev_err(&rcfw->pdev->dev,
308 				"QPLIB: CMD %s resp->cookie = %#x, evnt->cookie = %#x",
309 				crsqe->resp ? "mismatch" : "collision",
310 				crsqe->resp ? crsqe->resp->cookie : 0, mcookie);
311 		}
312 		if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap))
313 			dev_warn(&rcfw->pdev->dev,
314 				 "QPLIB: CMD bit %d was not requested", cbit);
315 		cmdq->cons += crsqe->req_size;
316 		crsqe->req_size = 0;
317 
318 		if (!blocked)
319 			wake_up(&rcfw->waitq);
320 		spin_unlock_irqrestore(&cmdq->lock, flags);
321 	}
322 	return 0;
323 }
324 
325 /* SP - CREQ Completion handlers */
326 static void bnxt_qplib_service_creq(unsigned long data)
327 {
328 	struct bnxt_qplib_rcfw *rcfw = (struct bnxt_qplib_rcfw *)data;
329 	struct bnxt_qplib_hwq *creq = &rcfw->creq;
330 	struct creq_base *creqe, **creq_ptr;
331 	u32 sw_cons, raw_cons;
332 	unsigned long flags;
333 	u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
334 
335 	/* Service the CREQ until budget is over */
336 	spin_lock_irqsave(&creq->lock, flags);
337 	raw_cons = creq->cons;
338 	while (budget > 0) {
339 		sw_cons = HWQ_CMP(raw_cons, creq);
340 		creq_ptr = (struct creq_base **)creq->pbl_ptr;
341 		creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)];
342 		if (!CREQ_CMP_VALID(creqe, raw_cons, creq->max_elements))
343 			break;
344 
345 		type = creqe->type & CREQ_BASE_TYPE_MASK;
346 		switch (type) {
347 		case CREQ_BASE_TYPE_QP_EVENT:
348 			bnxt_qplib_process_qp_event
349 				(rcfw, (struct creq_qp_event *)creqe);
350 			rcfw->creq_qp_event_processed++;
351 			break;
352 		case CREQ_BASE_TYPE_FUNC_EVENT:
353 			if (!bnxt_qplib_process_func_event
354 			    (rcfw, (struct creq_func_event *)creqe))
355 				rcfw->creq_func_event_processed++;
356 			else
357 				dev_warn
358 				(&rcfw->pdev->dev, "QPLIB:aeqe:%#x Not handled",
359 				 type);
360 			break;
361 		default:
362 			dev_warn(&rcfw->pdev->dev, "QPLIB: creqe with ");
363 			dev_warn(&rcfw->pdev->dev,
364 				 "QPLIB: op_event = 0x%x not handled", type);
365 			break;
366 		}
367 		raw_cons++;
368 		budget--;
369 	}
370 
371 	if (creq->cons != raw_cons) {
372 		creq->cons = raw_cons;
373 		CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons,
374 			      creq->max_elements);
375 	}
376 	spin_unlock_irqrestore(&creq->lock, flags);
377 }
378 
379 static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
380 {
381 	struct bnxt_qplib_rcfw *rcfw = dev_instance;
382 	struct bnxt_qplib_hwq *creq = &rcfw->creq;
383 	struct creq_base **creq_ptr;
384 	u32 sw_cons;
385 
386 	/* Prefetch the CREQ element */
387 	sw_cons = HWQ_CMP(creq->cons, creq);
388 	creq_ptr = (struct creq_base **)rcfw->creq.pbl_ptr;
389 	prefetch(&creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]);
390 
391 	tasklet_schedule(&rcfw->worker);
392 
393 	return IRQ_HANDLED;
394 }
395 
396 /* RCFW */
397 int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw)
398 {
399 	struct cmdq_deinitialize_fw req;
400 	struct creq_deinitialize_fw_resp resp;
401 	u16 cmd_flags = 0;
402 	int rc;
403 
404 	RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags);
405 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
406 					  NULL, 0);
407 	if (rc)
408 		return rc;
409 
410 	clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
411 	return 0;
412 }
413 
414 static int __get_pbl_pg_idx(struct bnxt_qplib_pbl *pbl)
415 {
416 	return (pbl->pg_size == ROCE_PG_SIZE_4K ?
417 				      CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K :
418 		pbl->pg_size == ROCE_PG_SIZE_8K ?
419 				      CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8K :
420 		pbl->pg_size == ROCE_PG_SIZE_64K ?
421 				      CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_64K :
422 		pbl->pg_size == ROCE_PG_SIZE_2M ?
423 				      CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_2M :
424 		pbl->pg_size == ROCE_PG_SIZE_8M ?
425 				      CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8M :
426 		pbl->pg_size == ROCE_PG_SIZE_1G ?
427 				      CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_1G :
428 				      CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K);
429 }
430 
431 int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
432 			 struct bnxt_qplib_ctx *ctx, int is_virtfn)
433 {
434 	struct cmdq_initialize_fw req;
435 	struct creq_initialize_fw_resp resp;
436 	u16 cmd_flags = 0, level;
437 	int rc;
438 
439 	RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
440 
441 	/*
442 	 * VFs need not setup the HW context area, PF
443 	 * shall setup this area for VF. Skipping the
444 	 * HW programming
445 	 */
446 	if (is_virtfn)
447 		goto skip_ctx_setup;
448 
449 	level = ctx->qpc_tbl.level;
450 	req.qpc_pg_size_qpc_lvl = (level << CMDQ_INITIALIZE_FW_QPC_LVL_SFT) |
451 				__get_pbl_pg_idx(&ctx->qpc_tbl.pbl[level]);
452 	level = ctx->mrw_tbl.level;
453 	req.mrw_pg_size_mrw_lvl = (level << CMDQ_INITIALIZE_FW_MRW_LVL_SFT) |
454 				__get_pbl_pg_idx(&ctx->mrw_tbl.pbl[level]);
455 	level = ctx->srqc_tbl.level;
456 	req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) |
457 				__get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]);
458 	level = ctx->cq_tbl.level;
459 	req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) |
460 				__get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]);
461 	level = ctx->srqc_tbl.level;
462 	req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) |
463 				__get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]);
464 	level = ctx->cq_tbl.level;
465 	req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) |
466 				__get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]);
467 	level = ctx->tim_tbl.level;
468 	req.tim_pg_size_tim_lvl = (level << CMDQ_INITIALIZE_FW_TIM_LVL_SFT) |
469 				  __get_pbl_pg_idx(&ctx->tim_tbl.pbl[level]);
470 	level = ctx->tqm_pde_level;
471 	req.tqm_pg_size_tqm_lvl = (level << CMDQ_INITIALIZE_FW_TQM_LVL_SFT) |
472 				  __get_pbl_pg_idx(&ctx->tqm_pde.pbl[level]);
473 
474 	req.qpc_page_dir =
475 		cpu_to_le64(ctx->qpc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
476 	req.mrw_page_dir =
477 		cpu_to_le64(ctx->mrw_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
478 	req.srq_page_dir =
479 		cpu_to_le64(ctx->srqc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
480 	req.cq_page_dir =
481 		cpu_to_le64(ctx->cq_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
482 	req.tim_page_dir =
483 		cpu_to_le64(ctx->tim_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
484 	req.tqm_page_dir =
485 		cpu_to_le64(ctx->tqm_pde.pbl[PBL_LVL_0].pg_map_arr[0]);
486 
487 	req.number_of_qp = cpu_to_le32(ctx->qpc_tbl.max_elements);
488 	req.number_of_mrw = cpu_to_le32(ctx->mrw_tbl.max_elements);
489 	req.number_of_srq = cpu_to_le32(ctx->srqc_tbl.max_elements);
490 	req.number_of_cq = cpu_to_le32(ctx->cq_tbl.max_elements);
491 
492 	req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf);
493 	req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf);
494 	req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf);
495 	req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf);
496 	req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf);
497 
498 skip_ctx_setup:
499 	req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id);
500 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
501 					  NULL, 0);
502 	if (rc)
503 		return rc;
504 	set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
505 	return 0;
506 }
507 
508 void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
509 {
510 	kfree(rcfw->crsqe_tbl);
511 	bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq);
512 	bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq);
513 	rcfw->pdev = NULL;
514 }
515 
516 int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
517 				  struct bnxt_qplib_rcfw *rcfw)
518 {
519 	rcfw->pdev = pdev;
520 	rcfw->creq.max_elements = BNXT_QPLIB_CREQE_MAX_CNT;
521 	if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->creq, NULL, 0,
522 				      &rcfw->creq.max_elements,
523 				      BNXT_QPLIB_CREQE_UNITS, 0, PAGE_SIZE,
524 				      HWQ_TYPE_L2_CMPL)) {
525 		dev_err(&rcfw->pdev->dev,
526 			"QPLIB: HW channel CREQ allocation failed");
527 		goto fail;
528 	}
529 	rcfw->cmdq.max_elements = BNXT_QPLIB_CMDQE_MAX_CNT;
530 	if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->cmdq, NULL, 0,
531 				      &rcfw->cmdq.max_elements,
532 				      BNXT_QPLIB_CMDQE_UNITS, 0, PAGE_SIZE,
533 				      HWQ_TYPE_CTX)) {
534 		dev_err(&rcfw->pdev->dev,
535 			"QPLIB: HW channel CMDQ allocation failed");
536 		goto fail;
537 	}
538 
539 	rcfw->crsqe_tbl = kcalloc(rcfw->cmdq.max_elements,
540 				  sizeof(*rcfw->crsqe_tbl), GFP_KERNEL);
541 	if (!rcfw->crsqe_tbl)
542 		goto fail;
543 
544 	return 0;
545 
546 fail:
547 	bnxt_qplib_free_rcfw_channel(rcfw);
548 	return -ENOMEM;
549 }
550 
551 void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
552 {
553 	unsigned long indx;
554 
555 	/* Make sure the HW channel is stopped! */
556 	synchronize_irq(rcfw->vector);
557 	tasklet_disable(&rcfw->worker);
558 	tasklet_kill(&rcfw->worker);
559 
560 	if (rcfw->requested) {
561 		free_irq(rcfw->vector, rcfw);
562 		rcfw->requested = false;
563 	}
564 	if (rcfw->cmdq_bar_reg_iomem)
565 		iounmap(rcfw->cmdq_bar_reg_iomem);
566 	rcfw->cmdq_bar_reg_iomem = NULL;
567 
568 	if (rcfw->creq_bar_reg_iomem)
569 		iounmap(rcfw->creq_bar_reg_iomem);
570 	rcfw->creq_bar_reg_iomem = NULL;
571 
572 	indx = find_first_bit(rcfw->cmdq_bitmap, rcfw->bmap_size);
573 	if (indx != rcfw->bmap_size)
574 		dev_err(&rcfw->pdev->dev,
575 			"QPLIB: disabling RCFW with pending cmd-bit %lx", indx);
576 	kfree(rcfw->cmdq_bitmap);
577 	rcfw->bmap_size = 0;
578 
579 	rcfw->aeq_handler = NULL;
580 	rcfw->vector = 0;
581 }
582 
583 int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
584 				   struct bnxt_qplib_rcfw *rcfw,
585 				   int msix_vector,
586 				   int cp_bar_reg_off, int virt_fn,
587 				   int (*aeq_handler)(struct bnxt_qplib_rcfw *,
588 						      struct creq_func_event *))
589 {
590 	resource_size_t res_base;
591 	struct cmdq_init init;
592 	u16 bmap_size;
593 	int rc;
594 
595 	/* General */
596 	rcfw->seq_num = 0;
597 	rcfw->flags = FIRMWARE_FIRST_FLAG;
598 	bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD *
599 				  sizeof(unsigned long));
600 	rcfw->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL);
601 	if (!rcfw->cmdq_bitmap)
602 		return -ENOMEM;
603 	rcfw->bmap_size = bmap_size;
604 
605 	/* CMDQ */
606 	rcfw->cmdq_bar_reg = RCFW_COMM_PCI_BAR_REGION;
607 	res_base = pci_resource_start(pdev, rcfw->cmdq_bar_reg);
608 	if (!res_base)
609 		return -ENOMEM;
610 
611 	rcfw->cmdq_bar_reg_iomem = ioremap_nocache(res_base +
612 					      RCFW_COMM_BASE_OFFSET,
613 					      RCFW_COMM_SIZE);
614 	if (!rcfw->cmdq_bar_reg_iomem) {
615 		dev_err(&rcfw->pdev->dev,
616 			"QPLIB: CMDQ BAR region %d mapping failed",
617 			rcfw->cmdq_bar_reg);
618 		return -ENOMEM;
619 	}
620 
621 	rcfw->cmdq_bar_reg_prod_off = virt_fn ? RCFW_VF_COMM_PROD_OFFSET :
622 					RCFW_PF_COMM_PROD_OFFSET;
623 
624 	rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET;
625 
626 	/* CREQ */
627 	rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION;
628 	res_base = pci_resource_start(pdev, rcfw->creq_bar_reg);
629 	if (!res_base)
630 		dev_err(&rcfw->pdev->dev,
631 			"QPLIB: CREQ BAR region %d resc start is 0!",
632 			rcfw->creq_bar_reg);
633 	rcfw->creq_bar_reg_iomem = ioremap_nocache(res_base + cp_bar_reg_off,
634 						   4);
635 	if (!rcfw->creq_bar_reg_iomem) {
636 		dev_err(&rcfw->pdev->dev,
637 			"QPLIB: CREQ BAR region %d mapping failed",
638 			rcfw->creq_bar_reg);
639 		return -ENOMEM;
640 	}
641 	rcfw->creq_qp_event_processed = 0;
642 	rcfw->creq_func_event_processed = 0;
643 
644 	rcfw->vector = msix_vector;
645 	if (aeq_handler)
646 		rcfw->aeq_handler = aeq_handler;
647 
648 	tasklet_init(&rcfw->worker, bnxt_qplib_service_creq,
649 		     (unsigned long)rcfw);
650 
651 	rcfw->requested = false;
652 	rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0,
653 			 "bnxt_qplib_creq", rcfw);
654 	if (rc) {
655 		dev_err(&rcfw->pdev->dev,
656 			"QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc);
657 		bnxt_qplib_disable_rcfw_channel(rcfw);
658 		return rc;
659 	}
660 	rcfw->requested = true;
661 
662 	init_waitqueue_head(&rcfw->waitq);
663 
664 	CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, 0, rcfw->creq.max_elements);
665 
666 	init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]);
667 	init.cmdq_size_cmdq_lvl = cpu_to_le16(
668 		((BNXT_QPLIB_CMDQE_MAX_CNT << CMDQ_INIT_CMDQ_SIZE_SFT) &
669 		 CMDQ_INIT_CMDQ_SIZE_MASK) |
670 		((rcfw->cmdq.level << CMDQ_INIT_CMDQ_LVL_SFT) &
671 		 CMDQ_INIT_CMDQ_LVL_MASK));
672 	init.creq_ring_id = cpu_to_le16(rcfw->creq_ring_id);
673 
674 	/* Write to the Bono mailbox register */
675 	__iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4);
676 	return 0;
677 }
678 
679 struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
680 		struct bnxt_qplib_rcfw *rcfw,
681 		u32 size)
682 {
683 	struct bnxt_qplib_rcfw_sbuf *sbuf;
684 
685 	sbuf = kzalloc(sizeof(*sbuf), GFP_ATOMIC);
686 	if (!sbuf)
687 		return NULL;
688 
689 	sbuf->size = size;
690 	sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size,
691 				       &sbuf->dma_addr, GFP_ATOMIC);
692 	if (!sbuf->sb)
693 		goto bail;
694 
695 	return sbuf;
696 bail:
697 	kfree(sbuf);
698 	return NULL;
699 }
700 
701 void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw,
702 			       struct bnxt_qplib_rcfw_sbuf *sbuf)
703 {
704 	if (sbuf->sb)
705 		dma_free_coherent(&rcfw->pdev->dev, sbuf->size,
706 				  sbuf->sb, sbuf->dma_addr);
707 	kfree(sbuf);
708 }
709