1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: RDMA Controller HW interface
37  */
38 
39 #define dev_fmt(fmt) "QPLIB: " fmt
40 
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/pci.h>
44 #include <linux/prefetch.h>
45 #include <linux/delay.h>
46 
47 #include "roce_hsi.h"
48 #include "qplib_res.h"
49 #include "qplib_rcfw.h"
50 #include "qplib_sp.h"
51 #include "qplib_fp.h"
52 
53 static void bnxt_qplib_service_creq(unsigned long data);
54 
55 /* Hardware communication channel */
56 static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
57 {
58 	u16 cbit;
59 	int rc;
60 
61 	cbit = cookie % rcfw->cmdq_depth;
62 	rc = wait_event_timeout(rcfw->waitq,
63 				!test_bit(cbit, rcfw->cmdq_bitmap),
64 				msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS));
65 	return rc ? 0 : -ETIMEDOUT;
66 };
67 
68 static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
69 {
70 	u32 count = RCFW_BLOCKED_CMD_WAIT_COUNT;
71 	u16 cbit;
72 
73 	cbit = cookie % rcfw->cmdq_depth;
74 	if (!test_bit(cbit, rcfw->cmdq_bitmap))
75 		goto done;
76 	do {
77 		mdelay(1); /* 1m sec */
78 		bnxt_qplib_service_creq((unsigned long)rcfw);
79 	} while (test_bit(cbit, rcfw->cmdq_bitmap) && --count);
80 done:
81 	return count ? 0 : -ETIMEDOUT;
82 };
83 
84 static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
85 			  struct creq_base *resp, void *sb, u8 is_block)
86 {
87 	struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr;
88 	struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
89 	u32 cmdq_depth = rcfw->cmdq_depth;
90 	struct bnxt_qplib_crsq *crsqe;
91 	u32 sw_prod, cmdq_prod;
92 	unsigned long flags;
93 	u32 size, opcode;
94 	u16 cookie, cbit;
95 	u8 *preq;
96 
97 	opcode = req->opcode;
98 	if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
99 	    (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
100 	     opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW &&
101 	     opcode != CMDQ_BASE_OPCODE_QUERY_VERSION)) {
102 		dev_err(&rcfw->pdev->dev,
103 			"RCFW not initialized, reject opcode 0x%x\n", opcode);
104 		return -EINVAL;
105 	}
106 
107 	if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
108 	    opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) {
109 		dev_err(&rcfw->pdev->dev, "RCFW already initialized!\n");
110 		return -EINVAL;
111 	}
112 
113 	if (test_bit(FIRMWARE_TIMED_OUT, &rcfw->flags))
114 		return -ETIMEDOUT;
115 
116 	/* Cmdq are in 16-byte units, each request can consume 1 or more
117 	 * cmdqe
118 	 */
119 	spin_lock_irqsave(&cmdq->lock, flags);
120 	if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) {
121 		dev_err(&rcfw->pdev->dev, "RCFW: CMDQ is full!\n");
122 		spin_unlock_irqrestore(&cmdq->lock, flags);
123 		return -EAGAIN;
124 	}
125 
126 
127 	cookie = rcfw->seq_num & RCFW_MAX_COOKIE_VALUE;
128 	cbit = cookie % rcfw->cmdq_depth;
129 	if (is_block)
130 		cookie |= RCFW_CMD_IS_BLOCKING;
131 
132 	set_bit(cbit, rcfw->cmdq_bitmap);
133 	req->cookie = cpu_to_le16(cookie);
134 	crsqe = &rcfw->crsqe_tbl[cbit];
135 	if (crsqe->resp) {
136 		spin_unlock_irqrestore(&cmdq->lock, flags);
137 		return -EBUSY;
138 	}
139 	memset(resp, 0, sizeof(*resp));
140 	crsqe->resp = (struct creq_qp_event *)resp;
141 	crsqe->resp->cookie = req->cookie;
142 	crsqe->req_size = req->cmd_size;
143 	if (req->resp_size && sb) {
144 		struct bnxt_qplib_rcfw_sbuf *sbuf = sb;
145 
146 		req->resp_addr = cpu_to_le64(sbuf->dma_addr);
147 		req->resp_size = (sbuf->size + BNXT_QPLIB_CMDQE_UNITS - 1) /
148 				  BNXT_QPLIB_CMDQE_UNITS;
149 	}
150 
151 	cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr;
152 	preq = (u8 *)req;
153 	size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
154 	do {
155 		/* Locate the next cmdq slot */
156 		sw_prod = HWQ_CMP(cmdq->prod, cmdq);
157 		cmdqe = &cmdq_ptr[get_cmdq_pg(sw_prod, cmdq_depth)]
158 				[get_cmdq_idx(sw_prod, cmdq_depth)];
159 		if (!cmdqe) {
160 			dev_err(&rcfw->pdev->dev,
161 				"RCFW request failed with no cmdqe!\n");
162 			goto done;
163 		}
164 		/* Copy a segment of the req cmd to the cmdq */
165 		memset(cmdqe, 0, sizeof(*cmdqe));
166 		memcpy(cmdqe, preq, min_t(u32, size, sizeof(*cmdqe)));
167 		preq += min_t(u32, size, sizeof(*cmdqe));
168 		size -= min_t(u32, size, sizeof(*cmdqe));
169 		cmdq->prod++;
170 		rcfw->seq_num++;
171 	} while (size > 0);
172 
173 	rcfw->seq_num++;
174 
175 	cmdq_prod = cmdq->prod;
176 	if (test_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags)) {
177 		/* The very first doorbell write
178 		 * is required to set this flag
179 		 * which prompts the FW to reset
180 		 * its internal pointers
181 		 */
182 		cmdq_prod |= BIT(FIRMWARE_FIRST_FLAG);
183 		clear_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags);
184 	}
185 
186 	/* ring CMDQ DB */
187 	wmb();
188 	writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem +
189 	       rcfw->cmdq_bar_reg_prod_off);
190 	writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem +
191 	       rcfw->cmdq_bar_reg_trig_off);
192 done:
193 	spin_unlock_irqrestore(&cmdq->lock, flags);
194 	/* Return the CREQ response pointer */
195 	return 0;
196 }
197 
198 int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
199 				 struct cmdq_base *req,
200 				 struct creq_base *resp,
201 				 void *sb, u8 is_block)
202 {
203 	struct creq_qp_event *evnt = (struct creq_qp_event *)resp;
204 	u16 cookie;
205 	u8 opcode, retry_cnt = 0xFF;
206 	int rc = 0;
207 
208 	do {
209 		opcode = req->opcode;
210 		rc = __send_message(rcfw, req, resp, sb, is_block);
211 		cookie = le16_to_cpu(req->cookie) & RCFW_MAX_COOKIE_VALUE;
212 		if (!rc)
213 			break;
214 
215 		if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) {
216 			/* send failed */
217 			dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x send failed\n",
218 				cookie, opcode);
219 			return rc;
220 		}
221 		is_block ? mdelay(1) : usleep_range(500, 1000);
222 
223 	} while (retry_cnt--);
224 
225 	if (is_block)
226 		rc = __block_for_resp(rcfw, cookie);
227 	else
228 		rc = __wait_for_resp(rcfw, cookie);
229 	if (rc) {
230 		/* timed out */
231 		dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x timedout (%d)msec\n",
232 			cookie, opcode, RCFW_CMD_WAIT_TIME_MS);
233 		set_bit(FIRMWARE_TIMED_OUT, &rcfw->flags);
234 		return rc;
235 	}
236 
237 	if (evnt->status) {
238 		/* failed with status */
239 		dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x status %#x\n",
240 			cookie, opcode, evnt->status);
241 		rc = -EFAULT;
242 	}
243 
244 	return rc;
245 }
246 /* Completions */
247 static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
248 					 struct creq_func_event *func_event)
249 {
250 	switch (func_event->event) {
251 	case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR:
252 		break;
253 	case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR:
254 		break;
255 	case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR:
256 		break;
257 	case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR:
258 		break;
259 	case CREQ_FUNC_EVENT_EVENT_CQ_ERROR:
260 		break;
261 	case CREQ_FUNC_EVENT_EVENT_TQM_ERROR:
262 		break;
263 	case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR:
264 		break;
265 	case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR:
266 		/* SRQ ctx error, call srq_handler??
267 		 * But there's no SRQ handle!
268 		 */
269 		break;
270 	case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR:
271 		break;
272 	case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR:
273 		break;
274 	case CREQ_FUNC_EVENT_EVENT_TIM_ERROR:
275 		break;
276 	case CREQ_FUNC_EVENT_EVENT_VF_COMM_REQUEST:
277 		break;
278 	case CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED:
279 		break;
280 	default:
281 		return -EINVAL;
282 	}
283 	return 0;
284 }
285 
286 static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
287 				       struct creq_qp_event *qp_event)
288 {
289 	struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
290 	struct creq_qp_error_notification *err_event;
291 	struct bnxt_qplib_crsq *crsqe;
292 	unsigned long flags;
293 	struct bnxt_qplib_qp *qp;
294 	u16 cbit, blocked = 0;
295 	u16 cookie;
296 	__le16  mcookie;
297 	u32 qp_id;
298 
299 	switch (qp_event->event) {
300 	case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
301 		err_event = (struct creq_qp_error_notification *)qp_event;
302 		qp_id = le32_to_cpu(err_event->xid);
303 		qp = rcfw->qp_tbl[qp_id].qp_handle;
304 		dev_dbg(&rcfw->pdev->dev,
305 			"Received QP error notification\n");
306 		dev_dbg(&rcfw->pdev->dev,
307 			"qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
308 			qp_id, err_event->req_err_state_reason,
309 			err_event->res_err_state_reason);
310 		if (!qp)
311 			break;
312 		bnxt_qplib_mark_qp_error(qp);
313 		rcfw->aeq_handler(rcfw, qp_event, qp);
314 		break;
315 	default:
316 		/*
317 		 * Command Response
318 		 * cmdq->lock needs to be acquired to synchronie
319 		 * the command send and completion reaping. This function
320 		 * is always called with creq->lock held. Using
321 		 * the nested variant of spin_lock.
322 		 *
323 		 */
324 
325 		spin_lock_irqsave_nested(&cmdq->lock, flags,
326 					 SINGLE_DEPTH_NESTING);
327 		cookie = le16_to_cpu(qp_event->cookie);
328 		mcookie = qp_event->cookie;
329 		blocked = cookie & RCFW_CMD_IS_BLOCKING;
330 		cookie &= RCFW_MAX_COOKIE_VALUE;
331 		cbit = cookie % rcfw->cmdq_depth;
332 		crsqe = &rcfw->crsqe_tbl[cbit];
333 		if (crsqe->resp &&
334 		    crsqe->resp->cookie  == mcookie) {
335 			memcpy(crsqe->resp, qp_event, sizeof(*qp_event));
336 			crsqe->resp = NULL;
337 		} else {
338 			if (crsqe->resp && crsqe->resp->cookie)
339 				dev_err(&rcfw->pdev->dev,
340 					"CMD %s cookie sent=%#x, recd=%#x\n",
341 					crsqe->resp ? "mismatch" : "collision",
342 					crsqe->resp ? crsqe->resp->cookie : 0,
343 					mcookie);
344 		}
345 		if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap))
346 			dev_warn(&rcfw->pdev->dev,
347 				 "CMD bit %d was not requested\n", cbit);
348 		cmdq->cons += crsqe->req_size;
349 		crsqe->req_size = 0;
350 
351 		if (!blocked)
352 			wake_up(&rcfw->waitq);
353 		spin_unlock_irqrestore(&cmdq->lock, flags);
354 	}
355 	return 0;
356 }
357 
358 /* SP - CREQ Completion handlers */
359 static void bnxt_qplib_service_creq(unsigned long data)
360 {
361 	struct bnxt_qplib_rcfw *rcfw = (struct bnxt_qplib_rcfw *)data;
362 	bool gen_p5 = bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx);
363 	struct bnxt_qplib_hwq *creq = &rcfw->creq;
364 	u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
365 	struct creq_base *creqe, **creq_ptr;
366 	u32 sw_cons, raw_cons;
367 	unsigned long flags;
368 
369 	/* Service the CREQ until budget is over */
370 	spin_lock_irqsave(&creq->lock, flags);
371 	raw_cons = creq->cons;
372 	while (budget > 0) {
373 		sw_cons = HWQ_CMP(raw_cons, creq);
374 		creq_ptr = (struct creq_base **)creq->pbl_ptr;
375 		creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)];
376 		if (!CREQ_CMP_VALID(creqe, raw_cons, creq->max_elements))
377 			break;
378 		/* The valid test of the entry must be done first before
379 		 * reading any further.
380 		 */
381 		dma_rmb();
382 
383 		type = creqe->type & CREQ_BASE_TYPE_MASK;
384 		switch (type) {
385 		case CREQ_BASE_TYPE_QP_EVENT:
386 			bnxt_qplib_process_qp_event
387 				(rcfw, (struct creq_qp_event *)creqe);
388 			rcfw->creq_qp_event_processed++;
389 			break;
390 		case CREQ_BASE_TYPE_FUNC_EVENT:
391 			if (!bnxt_qplib_process_func_event
392 			    (rcfw, (struct creq_func_event *)creqe))
393 				rcfw->creq_func_event_processed++;
394 			else
395 				dev_warn(&rcfw->pdev->dev,
396 					 "aeqe:%#x Not handled\n", type);
397 			break;
398 		default:
399 			if (type != ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT)
400 				dev_warn(&rcfw->pdev->dev,
401 					 "creqe with event 0x%x not handled\n",
402 					 type);
403 			break;
404 		}
405 		raw_cons++;
406 		budget--;
407 	}
408 
409 	if (creq->cons != raw_cons) {
410 		creq->cons = raw_cons;
411 		bnxt_qplib_ring_creq_db_rearm(rcfw->creq_bar_reg_iomem,
412 					      raw_cons, creq->max_elements,
413 					      rcfw->creq_ring_id, gen_p5);
414 	}
415 	spin_unlock_irqrestore(&creq->lock, flags);
416 }
417 
418 static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
419 {
420 	struct bnxt_qplib_rcfw *rcfw = dev_instance;
421 	struct bnxt_qplib_hwq *creq = &rcfw->creq;
422 	struct creq_base **creq_ptr;
423 	u32 sw_cons;
424 
425 	/* Prefetch the CREQ element */
426 	sw_cons = HWQ_CMP(creq->cons, creq);
427 	creq_ptr = (struct creq_base **)rcfw->creq.pbl_ptr;
428 	prefetch(&creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]);
429 
430 	tasklet_schedule(&rcfw->worker);
431 
432 	return IRQ_HANDLED;
433 }
434 
435 /* RCFW */
436 int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw)
437 {
438 	struct cmdq_deinitialize_fw req;
439 	struct creq_deinitialize_fw_resp resp;
440 	u16 cmd_flags = 0;
441 	int rc;
442 
443 	RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags);
444 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
445 					  NULL, 0);
446 	if (rc)
447 		return rc;
448 
449 	clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
450 	return 0;
451 }
452 
453 static int __get_pbl_pg_idx(struct bnxt_qplib_pbl *pbl)
454 {
455 	return (pbl->pg_size == ROCE_PG_SIZE_4K ?
456 				      CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K :
457 		pbl->pg_size == ROCE_PG_SIZE_8K ?
458 				      CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8K :
459 		pbl->pg_size == ROCE_PG_SIZE_64K ?
460 				      CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_64K :
461 		pbl->pg_size == ROCE_PG_SIZE_2M ?
462 				      CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_2M :
463 		pbl->pg_size == ROCE_PG_SIZE_8M ?
464 				      CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8M :
465 		pbl->pg_size == ROCE_PG_SIZE_1G ?
466 				      CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_1G :
467 				      CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K);
468 }
469 
470 int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
471 			 struct bnxt_qplib_ctx *ctx, int is_virtfn)
472 {
473 	struct cmdq_initialize_fw req;
474 	struct creq_initialize_fw_resp resp;
475 	u16 cmd_flags = 0, level;
476 	int rc;
477 
478 	RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
479 	/* Supply (log-base-2-of-host-page-size - base-page-shift)
480 	 * to bono to adjust the doorbell page sizes.
481 	 */
482 	req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT -
483 					   RCFW_DBR_BASE_PAGE_SHIFT);
484 	/*
485 	 * Gen P5 devices doesn't require this allocation
486 	 * as the L2 driver does the same for RoCE also.
487 	 * Also, VFs need not setup the HW context area, PF
488 	 * shall setup this area for VF. Skipping the
489 	 * HW programming
490 	 */
491 	if (is_virtfn || bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx))
492 		goto skip_ctx_setup;
493 
494 	level = ctx->qpc_tbl.level;
495 	req.qpc_pg_size_qpc_lvl = (level << CMDQ_INITIALIZE_FW_QPC_LVL_SFT) |
496 				__get_pbl_pg_idx(&ctx->qpc_tbl.pbl[level]);
497 	level = ctx->mrw_tbl.level;
498 	req.mrw_pg_size_mrw_lvl = (level << CMDQ_INITIALIZE_FW_MRW_LVL_SFT) |
499 				__get_pbl_pg_idx(&ctx->mrw_tbl.pbl[level]);
500 	level = ctx->srqc_tbl.level;
501 	req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) |
502 				__get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]);
503 	level = ctx->cq_tbl.level;
504 	req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) |
505 				__get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]);
506 	level = ctx->srqc_tbl.level;
507 	req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) |
508 				__get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]);
509 	level = ctx->cq_tbl.level;
510 	req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) |
511 				__get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]);
512 	level = ctx->tim_tbl.level;
513 	req.tim_pg_size_tim_lvl = (level << CMDQ_INITIALIZE_FW_TIM_LVL_SFT) |
514 				  __get_pbl_pg_idx(&ctx->tim_tbl.pbl[level]);
515 	level = ctx->tqm_pde_level;
516 	req.tqm_pg_size_tqm_lvl = (level << CMDQ_INITIALIZE_FW_TQM_LVL_SFT) |
517 				  __get_pbl_pg_idx(&ctx->tqm_pde.pbl[level]);
518 
519 	req.qpc_page_dir =
520 		cpu_to_le64(ctx->qpc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
521 	req.mrw_page_dir =
522 		cpu_to_le64(ctx->mrw_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
523 	req.srq_page_dir =
524 		cpu_to_le64(ctx->srqc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
525 	req.cq_page_dir =
526 		cpu_to_le64(ctx->cq_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
527 	req.tim_page_dir =
528 		cpu_to_le64(ctx->tim_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
529 	req.tqm_page_dir =
530 		cpu_to_le64(ctx->tqm_pde.pbl[PBL_LVL_0].pg_map_arr[0]);
531 
532 	req.number_of_qp = cpu_to_le32(ctx->qpc_tbl.max_elements);
533 	req.number_of_mrw = cpu_to_le32(ctx->mrw_tbl.max_elements);
534 	req.number_of_srq = cpu_to_le32(ctx->srqc_tbl.max_elements);
535 	req.number_of_cq = cpu_to_le32(ctx->cq_tbl.max_elements);
536 
537 	req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf);
538 	req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf);
539 	req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf);
540 	req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf);
541 	req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf);
542 
543 skip_ctx_setup:
544 	req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id);
545 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
546 					  NULL, 0);
547 	if (rc)
548 		return rc;
549 	set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
550 	return 0;
551 }
552 
553 void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
554 {
555 	kfree(rcfw->qp_tbl);
556 	kfree(rcfw->crsqe_tbl);
557 	bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq);
558 	bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq);
559 	rcfw->pdev = NULL;
560 }
561 
562 int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
563 				  struct bnxt_qplib_rcfw *rcfw,
564 				  struct bnxt_qplib_ctx *ctx,
565 				  int qp_tbl_sz)
566 {
567 	u8 hwq_type;
568 
569 	rcfw->pdev = pdev;
570 	rcfw->creq.max_elements = BNXT_QPLIB_CREQE_MAX_CNT;
571 	hwq_type = bnxt_qplib_get_hwq_type(rcfw->res);
572 	if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->creq, NULL,
573 				      &rcfw->creq.max_elements,
574 				      BNXT_QPLIB_CREQE_UNITS,
575 				      0, PAGE_SIZE, hwq_type)) {
576 		dev_err(&rcfw->pdev->dev,
577 			"HW channel CREQ allocation failed\n");
578 		goto fail;
579 	}
580 	if (ctx->hwrm_intf_ver < HWRM_VERSION_RCFW_CMDQ_DEPTH_CHECK)
581 		rcfw->cmdq_depth = BNXT_QPLIB_CMDQE_MAX_CNT_256;
582 	else
583 		rcfw->cmdq_depth = BNXT_QPLIB_CMDQE_MAX_CNT_8192;
584 
585 	rcfw->cmdq.max_elements = rcfw->cmdq_depth;
586 	if (bnxt_qplib_alloc_init_hwq
587 			(rcfw->pdev, &rcfw->cmdq, NULL,
588 			 &rcfw->cmdq.max_elements,
589 			 BNXT_QPLIB_CMDQE_UNITS, 0,
590 			 bnxt_qplib_cmdqe_page_size(rcfw->cmdq_depth),
591 			 HWQ_TYPE_CTX)) {
592 		dev_err(&rcfw->pdev->dev,
593 			"HW channel CMDQ allocation failed\n");
594 		goto fail;
595 	}
596 
597 	rcfw->crsqe_tbl = kcalloc(rcfw->cmdq.max_elements,
598 				  sizeof(*rcfw->crsqe_tbl), GFP_KERNEL);
599 	if (!rcfw->crsqe_tbl)
600 		goto fail;
601 
602 	rcfw->qp_tbl_size = qp_tbl_sz;
603 	rcfw->qp_tbl = kcalloc(qp_tbl_sz, sizeof(struct bnxt_qplib_qp_node),
604 			       GFP_KERNEL);
605 	if (!rcfw->qp_tbl)
606 		goto fail;
607 
608 	return 0;
609 
610 fail:
611 	bnxt_qplib_free_rcfw_channel(rcfw);
612 	return -ENOMEM;
613 }
614 
615 void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill)
616 {
617 	bool gen_p5 = bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx);
618 
619 	tasklet_disable(&rcfw->worker);
620 	/* Mask h/w interrupts */
621 	bnxt_qplib_ring_creq_db(rcfw->creq_bar_reg_iomem, rcfw->creq.cons,
622 				rcfw->creq.max_elements, rcfw->creq_ring_id,
623 				gen_p5);
624 	/* Sync with last running IRQ-handler */
625 	synchronize_irq(rcfw->vector);
626 	if (kill)
627 		tasklet_kill(&rcfw->worker);
628 
629 	if (rcfw->requested) {
630 		free_irq(rcfw->vector, rcfw);
631 		rcfw->requested = false;
632 	}
633 }
634 
635 void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
636 {
637 	unsigned long indx;
638 
639 	bnxt_qplib_rcfw_stop_irq(rcfw, true);
640 
641 	iounmap(rcfw->cmdq_bar_reg_iomem);
642 	iounmap(rcfw->creq_bar_reg_iomem);
643 
644 	indx = find_first_bit(rcfw->cmdq_bitmap, rcfw->bmap_size);
645 	if (indx != rcfw->bmap_size)
646 		dev_err(&rcfw->pdev->dev,
647 			"disabling RCFW with pending cmd-bit %lx\n", indx);
648 	kfree(rcfw->cmdq_bitmap);
649 	rcfw->bmap_size = 0;
650 
651 	rcfw->cmdq_bar_reg_iomem = NULL;
652 	rcfw->creq_bar_reg_iomem = NULL;
653 	rcfw->aeq_handler = NULL;
654 	rcfw->vector = 0;
655 }
656 
657 int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
658 			      bool need_init)
659 {
660 	bool gen_p5 = bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx);
661 	int rc;
662 
663 	if (rcfw->requested)
664 		return -EFAULT;
665 
666 	rcfw->vector = msix_vector;
667 	if (need_init)
668 		tasklet_init(&rcfw->worker,
669 			     bnxt_qplib_service_creq, (unsigned long)rcfw);
670 	else
671 		tasklet_enable(&rcfw->worker);
672 	rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0,
673 			 "bnxt_qplib_creq", rcfw);
674 	if (rc)
675 		return rc;
676 	rcfw->requested = true;
677 	bnxt_qplib_ring_creq_db_rearm(rcfw->creq_bar_reg_iomem,
678 				      rcfw->creq.cons, rcfw->creq.max_elements,
679 				      rcfw->creq_ring_id, gen_p5);
680 
681 	return 0;
682 }
683 
684 int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
685 				   struct bnxt_qplib_rcfw *rcfw,
686 				   int msix_vector,
687 				   int cp_bar_reg_off, int virt_fn,
688 				   int (*aeq_handler)(struct bnxt_qplib_rcfw *,
689 						      void *, void *))
690 {
691 	resource_size_t res_base;
692 	struct cmdq_init init;
693 	u16 bmap_size;
694 	int rc;
695 
696 	/* General */
697 	rcfw->seq_num = 0;
698 	set_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags);
699 	bmap_size = BITS_TO_LONGS(rcfw->cmdq_depth) * sizeof(unsigned long);
700 	rcfw->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL);
701 	if (!rcfw->cmdq_bitmap)
702 		return -ENOMEM;
703 	rcfw->bmap_size = bmap_size;
704 
705 	/* CMDQ */
706 	rcfw->cmdq_bar_reg = RCFW_COMM_PCI_BAR_REGION;
707 	res_base = pci_resource_start(pdev, rcfw->cmdq_bar_reg);
708 	if (!res_base)
709 		return -ENOMEM;
710 
711 	rcfw->cmdq_bar_reg_iomem = ioremap_nocache(res_base +
712 					      RCFW_COMM_BASE_OFFSET,
713 					      RCFW_COMM_SIZE);
714 	if (!rcfw->cmdq_bar_reg_iomem) {
715 		dev_err(&rcfw->pdev->dev, "CMDQ BAR region %d mapping failed\n",
716 			rcfw->cmdq_bar_reg);
717 		return -ENOMEM;
718 	}
719 
720 	rcfw->cmdq_bar_reg_prod_off = virt_fn ? RCFW_VF_COMM_PROD_OFFSET :
721 					RCFW_PF_COMM_PROD_OFFSET;
722 
723 	rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET;
724 
725 	/* CREQ */
726 	rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION;
727 	res_base = pci_resource_start(pdev, rcfw->creq_bar_reg);
728 	if (!res_base)
729 		dev_err(&rcfw->pdev->dev,
730 			"CREQ BAR region %d resc start is 0!\n",
731 			rcfw->creq_bar_reg);
732 	/* Unconditionally map 8 bytes to support 57500 series */
733 	rcfw->creq_bar_reg_iomem = ioremap_nocache(res_base + cp_bar_reg_off,
734 						   8);
735 	if (!rcfw->creq_bar_reg_iomem) {
736 		dev_err(&rcfw->pdev->dev, "CREQ BAR region %d mapping failed\n",
737 			rcfw->creq_bar_reg);
738 		iounmap(rcfw->cmdq_bar_reg_iomem);
739 		rcfw->cmdq_bar_reg_iomem = NULL;
740 		return -ENOMEM;
741 	}
742 	rcfw->creq_qp_event_processed = 0;
743 	rcfw->creq_func_event_processed = 0;
744 
745 	if (aeq_handler)
746 		rcfw->aeq_handler = aeq_handler;
747 	init_waitqueue_head(&rcfw->waitq);
748 
749 	rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector, true);
750 	if (rc) {
751 		dev_err(&rcfw->pdev->dev,
752 			"Failed to request IRQ for CREQ rc = 0x%x\n", rc);
753 		bnxt_qplib_disable_rcfw_channel(rcfw);
754 		return rc;
755 	}
756 
757 	init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]);
758 	init.cmdq_size_cmdq_lvl = cpu_to_le16(
759 		((rcfw->cmdq_depth << CMDQ_INIT_CMDQ_SIZE_SFT) &
760 		 CMDQ_INIT_CMDQ_SIZE_MASK) |
761 		((rcfw->cmdq.level << CMDQ_INIT_CMDQ_LVL_SFT) &
762 		 CMDQ_INIT_CMDQ_LVL_MASK));
763 	init.creq_ring_id = cpu_to_le16(rcfw->creq_ring_id);
764 
765 	/* Write to the Bono mailbox register */
766 	__iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4);
767 	return 0;
768 }
769 
770 struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
771 		struct bnxt_qplib_rcfw *rcfw,
772 		u32 size)
773 {
774 	struct bnxt_qplib_rcfw_sbuf *sbuf;
775 
776 	sbuf = kzalloc(sizeof(*sbuf), GFP_ATOMIC);
777 	if (!sbuf)
778 		return NULL;
779 
780 	sbuf->size = size;
781 	sbuf->sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf->size,
782 				      &sbuf->dma_addr, GFP_ATOMIC);
783 	if (!sbuf->sb)
784 		goto bail;
785 
786 	return sbuf;
787 bail:
788 	kfree(sbuf);
789 	return NULL;
790 }
791 
792 void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw,
793 			       struct bnxt_qplib_rcfw_sbuf *sbuf)
794 {
795 	if (sbuf->sb)
796 		dma_free_coherent(&rcfw->pdev->dev, sbuf->size,
797 				  sbuf->sb, sbuf->dma_addr);
798 	kfree(sbuf);
799 }
800