1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <linux/io.h>
36 #include <linux/delay.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/list.h>
41 #include <linux/pci.h>
42 #include <linux/slab.h>
43 #include <linux/spinlock.h>
44 #include <linux/string.h>
45 #include "qed.h"
46 #include "qed_cxt.h"
47 #include "qed_dev_api.h"
48 #include "qed_hsi.h"
49 #include "qed_hw.h"
50 #include "qed_int.h"
51 #include "qed_iscsi.h"
52 #include "qed_mcp.h"
53 #include "qed_ooo.h"
54 #include "qed_reg_addr.h"
55 #include "qed_sp.h"
56 #include "qed_sriov.h"
57 #include "qed_rdma.h"
58 
59 /***************************************************************************
60 * Structures & Definitions
61 ***************************************************************************/
62 
63 #define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
64 
65 #define SPQ_BLOCK_DELAY_MAX_ITER        (10)
66 #define SPQ_BLOCK_DELAY_US              (10)
67 #define SPQ_BLOCK_SLEEP_MAX_ITER        (1000)
68 #define SPQ_BLOCK_SLEEP_MS              (5)
69 
70 /***************************************************************************
71 * Blocking Imp. (BLOCK/EBLOCK mode)
72 ***************************************************************************/
73 static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
74 				void *cookie,
75 				union event_ring_data *data, u8 fw_return_code)
76 {
77 	struct qed_spq_comp_done *comp_done;
78 
79 	comp_done = (struct qed_spq_comp_done *)cookie;
80 
81 	comp_done->fw_return_code = fw_return_code;
82 
83 	/* Make sure completion done is visible on waiting thread */
84 	smp_store_release(&comp_done->done, 0x1);
85 }
86 
87 static int __qed_spq_block(struct qed_hwfn *p_hwfn,
88 			   struct qed_spq_entry *p_ent,
89 			   u8 *p_fw_ret, bool sleep_between_iter)
90 {
91 	struct qed_spq_comp_done *comp_done;
92 	u32 iter_cnt;
93 
94 	comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
95 	iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
96 				      : SPQ_BLOCK_DELAY_MAX_ITER;
97 
98 	while (iter_cnt--) {
99 		/* Validate we receive completion update */
100 		if (smp_load_acquire(&comp_done->done) == 1) { /* ^^^ */
101 			if (p_fw_ret)
102 				*p_fw_ret = comp_done->fw_return_code;
103 			return 0;
104 		}
105 
106 		if (sleep_between_iter)
107 			msleep(SPQ_BLOCK_SLEEP_MS);
108 		else
109 			udelay(SPQ_BLOCK_DELAY_US);
110 	}
111 
112 	return -EBUSY;
113 }
114 
115 static int qed_spq_block(struct qed_hwfn *p_hwfn,
116 			 struct qed_spq_entry *p_ent,
117 			 u8 *p_fw_ret, bool skip_quick_poll)
118 {
119 	struct qed_spq_comp_done *comp_done;
120 	struct qed_ptt *p_ptt;
121 	int rc;
122 
123 	/* A relatively short polling period w/o sleeping, to allow the FW to
124 	 * complete the ramrod and thus possibly to avoid the following sleeps.
125 	 */
126 	if (!skip_quick_poll) {
127 		rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false);
128 		if (!rc)
129 			return 0;
130 	}
131 
132 	/* Move to polling with a sleeping period between iterations */
133 	rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
134 	if (!rc)
135 		return 0;
136 
137 	p_ptt = qed_ptt_acquire(p_hwfn);
138 	if (!p_ptt) {
139 		DP_NOTICE(p_hwfn, "ptt, failed to acquire\n");
140 		return -EAGAIN;
141 	}
142 
143 	DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
144 	rc = qed_mcp_drain(p_hwfn, p_ptt);
145 	qed_ptt_release(p_hwfn, p_ptt);
146 	if (rc) {
147 		DP_NOTICE(p_hwfn, "MCP drain failed\n");
148 		goto err;
149 	}
150 
151 	/* Retry after drain */
152 	rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
153 	if (!rc)
154 		return 0;
155 
156 	comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
157 	if (comp_done->done == 1) {
158 		if (p_fw_ret)
159 			*p_fw_ret = comp_done->fw_return_code;
160 		return 0;
161 	}
162 err:
163 	p_ptt = qed_ptt_acquire(p_hwfn);
164 	if (!p_ptt)
165 		return -EBUSY;
166 	qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_RAMROD_FAIL,
167 			  "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
168 			  le32_to_cpu(p_ent->elem.hdr.cid),
169 			  p_ent->elem.hdr.cmd_id,
170 			  p_ent->elem.hdr.protocol_id,
171 			  le16_to_cpu(p_ent->elem.hdr.echo));
172 	qed_ptt_release(p_hwfn, p_ptt);
173 
174 	return -EBUSY;
175 }
176 
177 /***************************************************************************
178 * SPQ entries inner API
179 ***************************************************************************/
180 static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
181 			      struct qed_spq_entry *p_ent)
182 {
183 	p_ent->flags = 0;
184 
185 	switch (p_ent->comp_mode) {
186 	case QED_SPQ_MODE_EBLOCK:
187 	case QED_SPQ_MODE_BLOCK:
188 		p_ent->comp_cb.function = qed_spq_blocking_cb;
189 		break;
190 	case QED_SPQ_MODE_CB:
191 		break;
192 	default:
193 		DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
194 			  p_ent->comp_mode);
195 		return -EINVAL;
196 	}
197 
198 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
199 		   "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
200 		   p_ent->elem.hdr.cid,
201 		   p_ent->elem.hdr.cmd_id,
202 		   p_ent->elem.hdr.protocol_id,
203 		   p_ent->elem.data_ptr.hi,
204 		   p_ent->elem.data_ptr.lo,
205 		   D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
206 			   QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
207 			   "MODE_CB"));
208 
209 	return 0;
210 }
211 
212 /***************************************************************************
213 * HSI access
214 ***************************************************************************/
215 static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
216 				  struct qed_spq *p_spq)
217 {
218 	struct e4_core_conn_context *p_cxt;
219 	struct qed_cxt_info cxt_info;
220 	u16 physical_q;
221 	int rc;
222 
223 	cxt_info.iid = p_spq->cid;
224 
225 	rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
226 
227 	if (rc < 0) {
228 		DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
229 			  p_spq->cid);
230 		return;
231 	}
232 
233 	p_cxt = cxt_info.p_cxt;
234 
235 	SET_FIELD(p_cxt->xstorm_ag_context.flags10,
236 		  E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
237 	SET_FIELD(p_cxt->xstorm_ag_context.flags1,
238 		  E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
239 	SET_FIELD(p_cxt->xstorm_ag_context.flags9,
240 		  E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
241 
242 	/* QM physical queue */
243 	physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
244 	p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q);
245 
246 	p_cxt->xstorm_st_context.spq_base_lo =
247 		DMA_LO_LE(p_spq->chain.p_phys_addr);
248 	p_cxt->xstorm_st_context.spq_base_hi =
249 		DMA_HI_LE(p_spq->chain.p_phys_addr);
250 
251 	DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
252 		       p_hwfn->p_consq->chain.p_phys_addr);
253 }
254 
255 static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
256 			   struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
257 {
258 	struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
259 	struct core_db_data *p_db_data = &p_spq->db_data;
260 	u16 echo = qed_chain_get_prod_idx(p_chain);
261 	struct slow_path_element	*elem;
262 
263 	p_ent->elem.hdr.echo	= cpu_to_le16(echo);
264 	elem = qed_chain_produce(p_chain);
265 	if (!elem) {
266 		DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
267 		return -EINVAL;
268 	}
269 
270 	*elem = p_ent->elem; /* struct assignment */
271 
272 	/* send a doorbell on the slow hwfn session */
273 	p_db_data->spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
274 
275 	/* make sure the SPQE is updated before the doorbell */
276 	wmb();
277 
278 	DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
279 
280 	/* make sure doorbell is rang */
281 	wmb();
282 
283 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
284 		   "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
285 		   p_spq->db_addr_offset,
286 		   p_spq->cid,
287 		   p_db_data->params,
288 		   p_db_data->agg_flags, qed_chain_get_prod_idx(p_chain));
289 
290 	return 0;
291 }
292 
293 /***************************************************************************
294 * Asynchronous events
295 ***************************************************************************/
296 static int
297 qed_async_event_completion(struct qed_hwfn *p_hwfn,
298 			   struct event_ring_entry *p_eqe)
299 {
300 	qed_spq_async_comp_cb cb;
301 
302 	if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE))
303 		return -EINVAL;
304 
305 	cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
306 	if (cb) {
307 		return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
308 			  &p_eqe->data, p_eqe->fw_return_code);
309 	} else {
310 		DP_NOTICE(p_hwfn,
311 			  "Unknown Async completion for protocol: %d\n",
312 			  p_eqe->protocol_id);
313 		return -EINVAL;
314 	}
315 }
316 
317 int
318 qed_spq_register_async_cb(struct qed_hwfn *p_hwfn,
319 			  enum protocol_type protocol_id,
320 			  qed_spq_async_comp_cb cb)
321 {
322 	if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
323 		return -EINVAL;
324 
325 	p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
326 	return 0;
327 }
328 
329 void
330 qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
331 			    enum protocol_type protocol_id)
332 {
333 	if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
334 		return;
335 
336 	p_hwfn->p_spq->async_comp_cb[protocol_id] = NULL;
337 }
338 
339 /***************************************************************************
340 * EQ API
341 ***************************************************************************/
342 void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
343 {
344 	u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
345 		   USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
346 
347 	REG_WR16(p_hwfn, addr, prod);
348 }
349 
350 int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
351 {
352 	struct qed_eq *p_eq = cookie;
353 	struct qed_chain *p_chain = &p_eq->chain;
354 	int rc = 0;
355 
356 	/* take a snapshot of the FW consumer */
357 	u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
358 
359 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
360 
361 	/* Need to guarantee the fw_cons index we use points to a usuable
362 	 * element (to comply with our chain), so our macros would comply
363 	 */
364 	if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
365 	    qed_chain_get_usable_per_page(p_chain))
366 		fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
367 
368 	/* Complete current segment of eq entries */
369 	while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
370 		struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
371 
372 		if (!p_eqe) {
373 			rc = -EINVAL;
374 			break;
375 		}
376 
377 		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
378 			   "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
379 			   p_eqe->opcode,
380 			   p_eqe->protocol_id,
381 			   p_eqe->reserved0,
382 			   le16_to_cpu(p_eqe->echo),
383 			   p_eqe->fw_return_code,
384 			   p_eqe->flags);
385 
386 		if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
387 			if (qed_async_event_completion(p_hwfn, p_eqe))
388 				rc = -EINVAL;
389 		} else if (qed_spq_completion(p_hwfn,
390 					      p_eqe->echo,
391 					      p_eqe->fw_return_code,
392 					      &p_eqe->data)) {
393 			rc = -EINVAL;
394 		}
395 
396 		qed_chain_recycle_consumed(p_chain);
397 	}
398 
399 	qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
400 
401 	/* Attempt to post pending requests */
402 	spin_lock_bh(&p_hwfn->p_spq->lock);
403 	rc = qed_spq_pend_post(p_hwfn);
404 	spin_unlock_bh(&p_hwfn->p_spq->lock);
405 
406 	return rc;
407 }
408 
409 int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
410 {
411 	struct qed_eq *p_eq;
412 
413 	/* Allocate EQ struct */
414 	p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
415 	if (!p_eq)
416 		return -ENOMEM;
417 
418 	/* Allocate and initialize EQ chain*/
419 	if (qed_chain_alloc(p_hwfn->cdev,
420 			    QED_CHAIN_USE_TO_PRODUCE,
421 			    QED_CHAIN_MODE_PBL,
422 			    QED_CHAIN_CNT_TYPE_U16,
423 			    num_elem,
424 			    sizeof(union event_ring_element),
425 			    &p_eq->chain, NULL))
426 		goto eq_allocate_fail;
427 
428 	/* register EQ completion on the SP SB */
429 	qed_int_register_cb(p_hwfn, qed_eq_completion,
430 			    p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
431 
432 	p_hwfn->p_eq = p_eq;
433 	return 0;
434 
435 eq_allocate_fail:
436 	kfree(p_eq);
437 	return -ENOMEM;
438 }
439 
440 void qed_eq_setup(struct qed_hwfn *p_hwfn)
441 {
442 	qed_chain_reset(&p_hwfn->p_eq->chain);
443 }
444 
445 void qed_eq_free(struct qed_hwfn *p_hwfn)
446 {
447 	if (!p_hwfn->p_eq)
448 		return;
449 
450 	qed_chain_free(p_hwfn->cdev, &p_hwfn->p_eq->chain);
451 
452 	kfree(p_hwfn->p_eq);
453 	p_hwfn->p_eq = NULL;
454 }
455 
456 /***************************************************************************
457 * CQE API - manipulate EQ functionality
458 ***************************************************************************/
459 static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
460 			      struct eth_slow_path_rx_cqe *cqe,
461 			      enum protocol_type protocol)
462 {
463 	if (IS_VF(p_hwfn->cdev))
464 		return 0;
465 
466 	/* @@@tmp - it's possible we'll eventually want to handle some
467 	 * actual commands that can arrive here, but for now this is only
468 	 * used to complete the ramrod using the echo value on the cqe
469 	 */
470 	return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
471 }
472 
473 int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
474 			   struct eth_slow_path_rx_cqe *cqe)
475 {
476 	int rc;
477 
478 	rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
479 	if (rc)
480 		DP_NOTICE(p_hwfn,
481 			  "Failed to handle RXQ CQE [cmd 0x%02x]\n",
482 			  cqe->ramrod_cmd_id);
483 
484 	return rc;
485 }
486 
487 /***************************************************************************
488 * Slow hwfn Queue (spq)
489 ***************************************************************************/
490 void qed_spq_setup(struct qed_hwfn *p_hwfn)
491 {
492 	struct qed_spq *p_spq = p_hwfn->p_spq;
493 	struct qed_spq_entry *p_virt = NULL;
494 	struct core_db_data *p_db_data;
495 	void __iomem *db_addr;
496 	dma_addr_t p_phys = 0;
497 	u32 i, capacity;
498 	int rc;
499 
500 	INIT_LIST_HEAD(&p_spq->pending);
501 	INIT_LIST_HEAD(&p_spq->completion_pending);
502 	INIT_LIST_HEAD(&p_spq->free_pool);
503 	INIT_LIST_HEAD(&p_spq->unlimited_pending);
504 	spin_lock_init(&p_spq->lock);
505 
506 	/* SPQ empty pool */
507 	p_phys	= p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
508 	p_virt	= p_spq->p_virt;
509 
510 	capacity = qed_chain_get_capacity(&p_spq->chain);
511 	for (i = 0; i < capacity; i++) {
512 		DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
513 
514 		list_add_tail(&p_virt->list, &p_spq->free_pool);
515 
516 		p_virt++;
517 		p_phys += sizeof(struct qed_spq_entry);
518 	}
519 
520 	/* Statistics */
521 	p_spq->normal_count		= 0;
522 	p_spq->comp_count		= 0;
523 	p_spq->comp_sent_count		= 0;
524 	p_spq->unlimited_pending_count	= 0;
525 
526 	bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
527 	p_spq->comp_bitmap_idx = 0;
528 
529 	/* SPQ cid, cannot fail */
530 	qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
531 	qed_spq_hw_initialize(p_hwfn, p_spq);
532 
533 	/* reset the chain itself */
534 	qed_chain_reset(&p_spq->chain);
535 
536 	/* Initialize the address/data of the SPQ doorbell */
537 	p_spq->db_addr_offset = qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY);
538 	p_db_data = &p_spq->db_data;
539 	memset(p_db_data, 0, sizeof(*p_db_data));
540 	SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
541 	SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
542 	SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
543 		  DQ_XCM_CORE_SPQ_PROD_CMD);
544 	p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
545 
546 	/* Register the SPQ doorbell with the doorbell recovery mechanism */
547 	db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells +
548 				   p_spq->db_addr_offset);
549 	rc = qed_db_recovery_add(p_hwfn->cdev, db_addr, &p_spq->db_data,
550 				 DB_REC_WIDTH_32B, DB_REC_KERNEL);
551 	if (rc)
552 		DP_INFO(p_hwfn,
553 			"Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
554 }
555 
556 int qed_spq_alloc(struct qed_hwfn *p_hwfn)
557 {
558 	struct qed_spq_entry *p_virt = NULL;
559 	struct qed_spq *p_spq = NULL;
560 	dma_addr_t p_phys = 0;
561 	u32 capacity;
562 
563 	/* SPQ struct */
564 	p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
565 	if (!p_spq)
566 		return -ENOMEM;
567 
568 	/* SPQ ring  */
569 	if (qed_chain_alloc(p_hwfn->cdev,
570 			    QED_CHAIN_USE_TO_PRODUCE,
571 			    QED_CHAIN_MODE_SINGLE,
572 			    QED_CHAIN_CNT_TYPE_U16,
573 			    0,   /* N/A when the mode is SINGLE */
574 			    sizeof(struct slow_path_element),
575 			    &p_spq->chain, NULL))
576 		goto spq_allocate_fail;
577 
578 	/* allocate and fill the SPQ elements (incl. ramrod data list) */
579 	capacity = qed_chain_get_capacity(&p_spq->chain);
580 	p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
581 				    capacity * sizeof(struct qed_spq_entry),
582 				    &p_phys, GFP_KERNEL);
583 	if (!p_virt)
584 		goto spq_allocate_fail;
585 
586 	p_spq->p_virt = p_virt;
587 	p_spq->p_phys = p_phys;
588 	p_hwfn->p_spq = p_spq;
589 
590 	return 0;
591 
592 spq_allocate_fail:
593 	qed_chain_free(p_hwfn->cdev, &p_spq->chain);
594 	kfree(p_spq);
595 	return -ENOMEM;
596 }
597 
598 void qed_spq_free(struct qed_hwfn *p_hwfn)
599 {
600 	struct qed_spq *p_spq = p_hwfn->p_spq;
601 	void __iomem *db_addr;
602 	u32 capacity;
603 
604 	if (!p_spq)
605 		return;
606 
607 	/* Delete the SPQ doorbell from the doorbell recovery mechanism */
608 	db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells +
609 				   p_spq->db_addr_offset);
610 	qed_db_recovery_del(p_hwfn->cdev, db_addr, &p_spq->db_data);
611 
612 	if (p_spq->p_virt) {
613 		capacity = qed_chain_get_capacity(&p_spq->chain);
614 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
615 				  capacity *
616 				  sizeof(struct qed_spq_entry),
617 				  p_spq->p_virt, p_spq->p_phys);
618 	}
619 
620 	qed_chain_free(p_hwfn->cdev, &p_spq->chain);
621 	kfree(p_spq);
622 	p_hwfn->p_spq = NULL;
623 }
624 
625 int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
626 {
627 	struct qed_spq *p_spq = p_hwfn->p_spq;
628 	struct qed_spq_entry *p_ent = NULL;
629 	int rc = 0;
630 
631 	spin_lock_bh(&p_spq->lock);
632 
633 	if (list_empty(&p_spq->free_pool)) {
634 		p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
635 		if (!p_ent) {
636 			DP_NOTICE(p_hwfn,
637 				  "Failed to allocate an SPQ entry for a pending ramrod\n");
638 			rc = -ENOMEM;
639 			goto out_unlock;
640 		}
641 		p_ent->queue = &p_spq->unlimited_pending;
642 	} else {
643 		p_ent = list_first_entry(&p_spq->free_pool,
644 					 struct qed_spq_entry, list);
645 		list_del(&p_ent->list);
646 		p_ent->queue = &p_spq->pending;
647 	}
648 
649 	*pp_ent = p_ent;
650 
651 out_unlock:
652 	spin_unlock_bh(&p_spq->lock);
653 	return rc;
654 }
655 
656 /* Locked variant; Should be called while the SPQ lock is taken */
657 static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
658 				   struct qed_spq_entry *p_ent)
659 {
660 	list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
661 }
662 
663 void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
664 {
665 	spin_lock_bh(&p_hwfn->p_spq->lock);
666 	__qed_spq_return_entry(p_hwfn, p_ent);
667 	spin_unlock_bh(&p_hwfn->p_spq->lock);
668 }
669 
670 /**
671  * @brief qed_spq_add_entry - adds a new entry to the pending
672  *        list. Should be used while lock is being held.
673  *
674  * Addes an entry to the pending list is there is room (en empty
675  * element is available in the free_pool), or else places the
676  * entry in the unlimited_pending pool.
677  *
678  * @param p_hwfn
679  * @param p_ent
680  * @param priority
681  *
682  * @return int
683  */
684 static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
685 			     struct qed_spq_entry *p_ent,
686 			     enum spq_priority priority)
687 {
688 	struct qed_spq *p_spq = p_hwfn->p_spq;
689 
690 	if (p_ent->queue == &p_spq->unlimited_pending) {
691 
692 		if (list_empty(&p_spq->free_pool)) {
693 			list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
694 			p_spq->unlimited_pending_count++;
695 
696 			return 0;
697 		} else {
698 			struct qed_spq_entry *p_en2;
699 
700 			p_en2 = list_first_entry(&p_spq->free_pool,
701 						 struct qed_spq_entry, list);
702 			list_del(&p_en2->list);
703 
704 			/* Copy the ring element physical pointer to the new
705 			 * entry, since we are about to override the entire ring
706 			 * entry and don't want to lose the pointer.
707 			 */
708 			p_ent->elem.data_ptr = p_en2->elem.data_ptr;
709 
710 			*p_en2 = *p_ent;
711 
712 			/* EBLOCK responsible to free the allocated p_ent */
713 			if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
714 				kfree(p_ent);
715 			else
716 				p_ent->post_ent = p_en2;
717 
718 			p_ent = p_en2;
719 		}
720 	}
721 
722 	/* entry is to be placed in 'pending' queue */
723 	switch (priority) {
724 	case QED_SPQ_PRIORITY_NORMAL:
725 		list_add_tail(&p_ent->list, &p_spq->pending);
726 		p_spq->normal_count++;
727 		break;
728 	case QED_SPQ_PRIORITY_HIGH:
729 		list_add(&p_ent->list, &p_spq->pending);
730 		p_spq->high_count++;
731 		break;
732 	default:
733 		return -EINVAL;
734 	}
735 
736 	return 0;
737 }
738 
739 /***************************************************************************
740 * Accessor
741 ***************************************************************************/
742 u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
743 {
744 	if (!p_hwfn->p_spq)
745 		return 0xffffffff;      /* illegal */
746 	return p_hwfn->p_spq->cid;
747 }
748 
749 /***************************************************************************
750 * Posting new Ramrods
751 ***************************************************************************/
752 static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
753 			     struct list_head *head, u32 keep_reserve)
754 {
755 	struct qed_spq *p_spq = p_hwfn->p_spq;
756 	int rc;
757 
758 	while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
759 	       !list_empty(head)) {
760 		struct qed_spq_entry *p_ent =
761 			list_first_entry(head, struct qed_spq_entry, list);
762 		list_move_tail(&p_ent->list, &p_spq->completion_pending);
763 		p_spq->comp_sent_count++;
764 
765 		rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
766 		if (rc) {
767 			list_del(&p_ent->list);
768 			__qed_spq_return_entry(p_hwfn, p_ent);
769 			return rc;
770 		}
771 	}
772 
773 	return 0;
774 }
775 
776 int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
777 {
778 	struct qed_spq *p_spq = p_hwfn->p_spq;
779 	struct qed_spq_entry *p_ent = NULL;
780 
781 	while (!list_empty(&p_spq->free_pool)) {
782 		if (list_empty(&p_spq->unlimited_pending))
783 			break;
784 
785 		p_ent = list_first_entry(&p_spq->unlimited_pending,
786 					 struct qed_spq_entry, list);
787 		if (!p_ent)
788 			return -EINVAL;
789 
790 		list_del(&p_ent->list);
791 
792 		qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
793 	}
794 
795 	return qed_spq_post_list(p_hwfn, &p_spq->pending,
796 				 SPQ_HIGH_PRI_RESERVE_DEFAULT);
797 }
798 
799 static void qed_spq_recov_set_ret_code(struct qed_spq_entry *p_ent,
800 				       u8 *fw_return_code)
801 {
802 	if (!fw_return_code)
803 		return;
804 
805 	if (p_ent->elem.hdr.protocol_id == PROTOCOLID_ROCE ||
806 	    p_ent->elem.hdr.protocol_id == PROTOCOLID_IWARP)
807 		*fw_return_code = RDMA_RETURN_OK;
808 }
809 
810 /* Avoid overriding of SPQ entries when getting out-of-order completions, by
811  * marking the completions in a bitmap and increasing the chain consumer only
812  * for the first successive completed entries.
813  */
814 static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo)
815 {
816 	u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
817 	struct qed_spq *p_spq = p_hwfn->p_spq;
818 
819 	__set_bit(pos, p_spq->p_comp_bitmap);
820 	while (test_bit(p_spq->comp_bitmap_idx,
821 			p_spq->p_comp_bitmap)) {
822 		__clear_bit(p_spq->comp_bitmap_idx,
823 			    p_spq->p_comp_bitmap);
824 		p_spq->comp_bitmap_idx++;
825 		qed_chain_return_produced(&p_spq->chain);
826 	}
827 }
828 
829 int qed_spq_post(struct qed_hwfn *p_hwfn,
830 		 struct qed_spq_entry *p_ent, u8 *fw_return_code)
831 {
832 	int rc = 0;
833 	struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
834 	bool b_ret_ent = true;
835 	bool eblock;
836 
837 	if (!p_hwfn)
838 		return -EINVAL;
839 
840 	if (!p_ent) {
841 		DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
842 		return -EINVAL;
843 	}
844 
845 	if (p_hwfn->cdev->recov_in_prog) {
846 		DP_VERBOSE(p_hwfn,
847 			   QED_MSG_SPQ,
848 			   "Recovery is in progress. Skip spq post [cmd %02x protocol %02x]\n",
849 			   p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
850 
851 		/* Let the flow complete w/o any error handling */
852 		qed_spq_recov_set_ret_code(p_ent, fw_return_code);
853 		return 0;
854 	}
855 
856 	/* Complete the entry */
857 	rc = qed_spq_fill_entry(p_hwfn, p_ent);
858 
859 	spin_lock_bh(&p_spq->lock);
860 
861 	/* Check return value after LOCK is taken for cleaner error flow */
862 	if (rc)
863 		goto spq_post_fail;
864 
865 	/* Check if entry is in block mode before qed_spq_add_entry,
866 	 * which might kfree p_ent.
867 	 */
868 	eblock = (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK);
869 
870 	/* Add the request to the pending queue */
871 	rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
872 	if (rc)
873 		goto spq_post_fail;
874 
875 	rc = qed_spq_pend_post(p_hwfn);
876 	if (rc) {
877 		/* Since it's possible that pending failed for a different
878 		 * entry [although unlikely], the failed entry was already
879 		 * dealt with; No need to return it here.
880 		 */
881 		b_ret_ent = false;
882 		goto spq_post_fail;
883 	}
884 
885 	spin_unlock_bh(&p_spq->lock);
886 
887 	if (eblock) {
888 		/* For entries in QED BLOCK mode, the completion code cannot
889 		 * perform the necessary cleanup - if it did, we couldn't
890 		 * access p_ent here to see whether it's successful or not.
891 		 * Thus, after gaining the answer perform the cleanup here.
892 		 */
893 		rc = qed_spq_block(p_hwfn, p_ent, fw_return_code,
894 				   p_ent->queue == &p_spq->unlimited_pending);
895 
896 		if (p_ent->queue == &p_spq->unlimited_pending) {
897 			struct qed_spq_entry *p_post_ent = p_ent->post_ent;
898 
899 			kfree(p_ent);
900 
901 			/* Return the entry which was actually posted */
902 			p_ent = p_post_ent;
903 		}
904 
905 		if (rc)
906 			goto spq_post_fail2;
907 
908 		/* return to pool */
909 		qed_spq_return_entry(p_hwfn, p_ent);
910 	}
911 	return rc;
912 
913 spq_post_fail2:
914 	spin_lock_bh(&p_spq->lock);
915 	list_del(&p_ent->list);
916 	qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo);
917 
918 spq_post_fail:
919 	/* return to the free pool */
920 	if (b_ret_ent)
921 		__qed_spq_return_entry(p_hwfn, p_ent);
922 	spin_unlock_bh(&p_spq->lock);
923 
924 	return rc;
925 }
926 
927 int qed_spq_completion(struct qed_hwfn *p_hwfn,
928 		       __le16 echo,
929 		       u8 fw_return_code,
930 		       union event_ring_data *p_data)
931 {
932 	struct qed_spq		*p_spq;
933 	struct qed_spq_entry	*p_ent = NULL;
934 	struct qed_spq_entry	*tmp;
935 	struct qed_spq_entry	*found = NULL;
936 
937 	if (!p_hwfn)
938 		return -EINVAL;
939 
940 	p_spq = p_hwfn->p_spq;
941 	if (!p_spq)
942 		return -EINVAL;
943 
944 	spin_lock_bh(&p_spq->lock);
945 	list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
946 		if (p_ent->elem.hdr.echo == echo) {
947 			list_del(&p_ent->list);
948 			qed_spq_comp_bmap_update(p_hwfn, echo);
949 			p_spq->comp_count++;
950 			found = p_ent;
951 			break;
952 		}
953 
954 		/* This is relatively uncommon - depends on scenarios
955 		 * which have mutliple per-PF sent ramrods.
956 		 */
957 		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
958 			   "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
959 			   le16_to_cpu(echo),
960 			   le16_to_cpu(p_ent->elem.hdr.echo));
961 	}
962 
963 	/* Release lock before callback, as callback may post
964 	 * an additional ramrod.
965 	 */
966 	spin_unlock_bh(&p_spq->lock);
967 
968 	if (!found) {
969 		DP_NOTICE(p_hwfn,
970 			  "Failed to find an entry this EQE [echo %04x] completes\n",
971 			  le16_to_cpu(echo));
972 		return -EEXIST;
973 	}
974 
975 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
976 		   "Complete EQE [echo %04x]: func %p cookie %p)\n",
977 		   le16_to_cpu(echo),
978 		   p_ent->comp_cb.function, p_ent->comp_cb.cookie);
979 	if (found->comp_cb.function)
980 		found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
981 					fw_return_code);
982 	else
983 		DP_VERBOSE(p_hwfn,
984 			   QED_MSG_SPQ,
985 			   "Got a completion without a callback function\n");
986 
987 	if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
988 		/* EBLOCK  is responsible for returning its own entry into the
989 		 * free list.
990 		 */
991 		qed_spq_return_entry(p_hwfn, found);
992 
993 	return 0;
994 }
995 
996 int qed_consq_alloc(struct qed_hwfn *p_hwfn)
997 {
998 	struct qed_consq *p_consq;
999 
1000 	/* Allocate ConsQ struct */
1001 	p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
1002 	if (!p_consq)
1003 		return -ENOMEM;
1004 
1005 	/* Allocate and initialize EQ chain*/
1006 	if (qed_chain_alloc(p_hwfn->cdev,
1007 			    QED_CHAIN_USE_TO_PRODUCE,
1008 			    QED_CHAIN_MODE_PBL,
1009 			    QED_CHAIN_CNT_TYPE_U16,
1010 			    QED_CHAIN_PAGE_SIZE / 0x80,
1011 			    0x80, &p_consq->chain, NULL))
1012 		goto consq_allocate_fail;
1013 
1014 	p_hwfn->p_consq = p_consq;
1015 	return 0;
1016 
1017 consq_allocate_fail:
1018 	kfree(p_consq);
1019 	return -ENOMEM;
1020 }
1021 
1022 void qed_consq_setup(struct qed_hwfn *p_hwfn)
1023 {
1024 	qed_chain_reset(&p_hwfn->p_consq->chain);
1025 }
1026 
1027 void qed_consq_free(struct qed_hwfn *p_hwfn)
1028 {
1029 	if (!p_hwfn->p_consq)
1030 		return;
1031 
1032 	qed_chain_free(p_hwfn->cdev, &p_hwfn->p_consq->chain);
1033 
1034 	kfree(p_hwfn->p_consq);
1035 	p_hwfn->p_consq = NULL;
1036 }
1037