1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3  * Copyright (c) 2015-2017  QLogic Corporation
4  * Copyright (c) 2019-2020 Marvell International Ltd.
5  */
6 
7 #include <linux/types.h>
8 #include <asm/byteorder.h>
9 #include <linux/io.h>
10 #include <linux/delay.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/errno.h>
13 #include <linux/kernel.h>
14 #include <linux/list.h>
15 #include <linux/pci.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/string.h>
19 #include "qed.h"
20 #include "qed_cxt.h"
21 #include "qed_dev_api.h"
22 #include "qed_hsi.h"
23 #include "qed_hw.h"
24 #include "qed_int.h"
25 #include "qed_iscsi.h"
26 #include "qed_mcp.h"
27 #include "qed_ooo.h"
28 #include "qed_reg_addr.h"
29 #include "qed_sp.h"
30 #include "qed_sriov.h"
31 #include "qed_rdma.h"
32 
33 /***************************************************************************
34 * Structures & Definitions
35 ***************************************************************************/
36 
37 #define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
38 
39 #define SPQ_BLOCK_DELAY_MAX_ITER        (10)
40 #define SPQ_BLOCK_DELAY_US              (10)
41 #define SPQ_BLOCK_SLEEP_MAX_ITER        (1000)
42 #define SPQ_BLOCK_SLEEP_MS              (5)
43 
44 /***************************************************************************
45 * Blocking Imp. (BLOCK/EBLOCK mode)
46 ***************************************************************************/
47 static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
48 				void *cookie,
49 				union event_ring_data *data, u8 fw_return_code)
50 {
51 	struct qed_spq_comp_done *comp_done;
52 
53 	comp_done = (struct qed_spq_comp_done *)cookie;
54 
55 	comp_done->fw_return_code = fw_return_code;
56 
57 	/* Make sure completion done is visible on waiting thread */
58 	smp_store_release(&comp_done->done, 0x1);
59 }
60 
61 static int __qed_spq_block(struct qed_hwfn *p_hwfn,
62 			   struct qed_spq_entry *p_ent,
63 			   u8 *p_fw_ret, bool sleep_between_iter)
64 {
65 	struct qed_spq_comp_done *comp_done;
66 	u32 iter_cnt;
67 
68 	comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
69 	iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
70 				      : SPQ_BLOCK_DELAY_MAX_ITER;
71 
72 	while (iter_cnt--) {
73 		/* Validate we receive completion update */
74 		if (smp_load_acquire(&comp_done->done) == 1) { /* ^^^ */
75 			if (p_fw_ret)
76 				*p_fw_ret = comp_done->fw_return_code;
77 			return 0;
78 		}
79 
80 		if (sleep_between_iter)
81 			msleep(SPQ_BLOCK_SLEEP_MS);
82 		else
83 			udelay(SPQ_BLOCK_DELAY_US);
84 	}
85 
86 	return -EBUSY;
87 }
88 
89 static int qed_spq_block(struct qed_hwfn *p_hwfn,
90 			 struct qed_spq_entry *p_ent,
91 			 u8 *p_fw_ret, bool skip_quick_poll)
92 {
93 	struct qed_spq_comp_done *comp_done;
94 	struct qed_ptt *p_ptt;
95 	int rc;
96 
97 	/* A relatively short polling period w/o sleeping, to allow the FW to
98 	 * complete the ramrod and thus possibly to avoid the following sleeps.
99 	 */
100 	if (!skip_quick_poll) {
101 		rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false);
102 		if (!rc)
103 			return 0;
104 	}
105 
106 	/* Move to polling with a sleeping period between iterations */
107 	rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
108 	if (!rc)
109 		return 0;
110 
111 	p_ptt = qed_ptt_acquire(p_hwfn);
112 	if (!p_ptt) {
113 		DP_NOTICE(p_hwfn, "ptt, failed to acquire\n");
114 		return -EAGAIN;
115 	}
116 
117 	DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
118 	rc = qed_mcp_drain(p_hwfn, p_ptt);
119 	qed_ptt_release(p_hwfn, p_ptt);
120 	if (rc) {
121 		DP_NOTICE(p_hwfn, "MCP drain failed\n");
122 		goto err;
123 	}
124 
125 	/* Retry after drain */
126 	rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
127 	if (!rc)
128 		return 0;
129 
130 	comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
131 	if (comp_done->done == 1) {
132 		if (p_fw_ret)
133 			*p_fw_ret = comp_done->fw_return_code;
134 		return 0;
135 	}
136 err:
137 	p_ptt = qed_ptt_acquire(p_hwfn);
138 	if (!p_ptt)
139 		return -EBUSY;
140 	qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_RAMROD_FAIL,
141 			  "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
142 			  le32_to_cpu(p_ent->elem.hdr.cid),
143 			  p_ent->elem.hdr.cmd_id,
144 			  p_ent->elem.hdr.protocol_id,
145 			  le16_to_cpu(p_ent->elem.hdr.echo));
146 	qed_ptt_release(p_hwfn, p_ptt);
147 
148 	return -EBUSY;
149 }
150 
151 /***************************************************************************
152 * SPQ entries inner API
153 ***************************************************************************/
154 static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
155 			      struct qed_spq_entry *p_ent)
156 {
157 	p_ent->flags = 0;
158 
159 	switch (p_ent->comp_mode) {
160 	case QED_SPQ_MODE_EBLOCK:
161 	case QED_SPQ_MODE_BLOCK:
162 		p_ent->comp_cb.function = qed_spq_blocking_cb;
163 		break;
164 	case QED_SPQ_MODE_CB:
165 		break;
166 	default:
167 		DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
168 			  p_ent->comp_mode);
169 		return -EINVAL;
170 	}
171 
172 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
173 		   "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
174 		   p_ent->elem.hdr.cid,
175 		   p_ent->elem.hdr.cmd_id,
176 		   p_ent->elem.hdr.protocol_id,
177 		   p_ent->elem.data_ptr.hi,
178 		   p_ent->elem.data_ptr.lo,
179 		   D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
180 			   QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
181 			   "MODE_CB"));
182 
183 	return 0;
184 }
185 
186 /***************************************************************************
187 * HSI access
188 ***************************************************************************/
189 static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
190 				  struct qed_spq *p_spq)
191 {
192 	struct e4_core_conn_context *p_cxt;
193 	struct qed_cxt_info cxt_info;
194 	u16 physical_q;
195 	int rc;
196 
197 	cxt_info.iid = p_spq->cid;
198 
199 	rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
200 
201 	if (rc < 0) {
202 		DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
203 			  p_spq->cid);
204 		return;
205 	}
206 
207 	p_cxt = cxt_info.p_cxt;
208 
209 	SET_FIELD(p_cxt->xstorm_ag_context.flags10,
210 		  E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
211 	SET_FIELD(p_cxt->xstorm_ag_context.flags1,
212 		  E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
213 	SET_FIELD(p_cxt->xstorm_ag_context.flags9,
214 		  E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
215 
216 	/* QM physical queue */
217 	physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
218 	p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q);
219 
220 	p_cxt->xstorm_st_context.spq_base_lo =
221 		DMA_LO_LE(p_spq->chain.p_phys_addr);
222 	p_cxt->xstorm_st_context.spq_base_hi =
223 		DMA_HI_LE(p_spq->chain.p_phys_addr);
224 
225 	DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
226 		       p_hwfn->p_consq->chain.p_phys_addr);
227 }
228 
229 static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
230 			   struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
231 {
232 	struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
233 	struct core_db_data *p_db_data = &p_spq->db_data;
234 	u16 echo = qed_chain_get_prod_idx(p_chain);
235 	struct slow_path_element	*elem;
236 
237 	p_ent->elem.hdr.echo	= cpu_to_le16(echo);
238 	elem = qed_chain_produce(p_chain);
239 	if (!elem) {
240 		DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
241 		return -EINVAL;
242 	}
243 
244 	*elem = p_ent->elem; /* struct assignment */
245 
246 	/* send a doorbell on the slow hwfn session */
247 	p_db_data->spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
248 
249 	/* make sure the SPQE is updated before the doorbell */
250 	wmb();
251 
252 	DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
253 
254 	/* make sure doorbell is rang */
255 	wmb();
256 
257 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
258 		   "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
259 		   p_spq->db_addr_offset,
260 		   p_spq->cid,
261 		   p_db_data->params,
262 		   p_db_data->agg_flags, qed_chain_get_prod_idx(p_chain));
263 
264 	return 0;
265 }
266 
267 /***************************************************************************
268 * Asynchronous events
269 ***************************************************************************/
270 static int
271 qed_async_event_completion(struct qed_hwfn *p_hwfn,
272 			   struct event_ring_entry *p_eqe)
273 {
274 	qed_spq_async_comp_cb cb;
275 
276 	if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE))
277 		return -EINVAL;
278 
279 	cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
280 	if (cb) {
281 		return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
282 			  &p_eqe->data, p_eqe->fw_return_code);
283 	} else {
284 		DP_NOTICE(p_hwfn,
285 			  "Unknown Async completion for protocol: %d\n",
286 			  p_eqe->protocol_id);
287 		return -EINVAL;
288 	}
289 }
290 
291 int
292 qed_spq_register_async_cb(struct qed_hwfn *p_hwfn,
293 			  enum protocol_type protocol_id,
294 			  qed_spq_async_comp_cb cb)
295 {
296 	if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
297 		return -EINVAL;
298 
299 	p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
300 	return 0;
301 }
302 
303 void
304 qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
305 			    enum protocol_type protocol_id)
306 {
307 	if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
308 		return;
309 
310 	p_hwfn->p_spq->async_comp_cb[protocol_id] = NULL;
311 }
312 
313 /***************************************************************************
314 * EQ API
315 ***************************************************************************/
316 void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
317 {
318 	u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
319 		   USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
320 
321 	REG_WR16(p_hwfn, addr, prod);
322 }
323 
324 int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
325 {
326 	struct qed_eq *p_eq = cookie;
327 	struct qed_chain *p_chain = &p_eq->chain;
328 	int rc = 0;
329 
330 	/* take a snapshot of the FW consumer */
331 	u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
332 
333 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
334 
335 	/* Need to guarantee the fw_cons index we use points to a usuable
336 	 * element (to comply with our chain), so our macros would comply
337 	 */
338 	if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
339 	    qed_chain_get_usable_per_page(p_chain))
340 		fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
341 
342 	/* Complete current segment of eq entries */
343 	while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
344 		struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
345 
346 		if (!p_eqe) {
347 			rc = -EINVAL;
348 			break;
349 		}
350 
351 		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
352 			   "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
353 			   p_eqe->opcode,
354 			   p_eqe->protocol_id,
355 			   p_eqe->reserved0,
356 			   le16_to_cpu(p_eqe->echo),
357 			   p_eqe->fw_return_code,
358 			   p_eqe->flags);
359 
360 		if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
361 			if (qed_async_event_completion(p_hwfn, p_eqe))
362 				rc = -EINVAL;
363 		} else if (qed_spq_completion(p_hwfn,
364 					      p_eqe->echo,
365 					      p_eqe->fw_return_code,
366 					      &p_eqe->data)) {
367 			rc = -EINVAL;
368 		}
369 
370 		qed_chain_recycle_consumed(p_chain);
371 	}
372 
373 	qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
374 
375 	/* Attempt to post pending requests */
376 	spin_lock_bh(&p_hwfn->p_spq->lock);
377 	rc = qed_spq_pend_post(p_hwfn);
378 	spin_unlock_bh(&p_hwfn->p_spq->lock);
379 
380 	return rc;
381 }
382 
383 int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
384 {
385 	struct qed_eq *p_eq;
386 
387 	/* Allocate EQ struct */
388 	p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
389 	if (!p_eq)
390 		return -ENOMEM;
391 
392 	/* Allocate and initialize EQ chain*/
393 	if (qed_chain_alloc(p_hwfn->cdev,
394 			    QED_CHAIN_USE_TO_PRODUCE,
395 			    QED_CHAIN_MODE_PBL,
396 			    QED_CHAIN_CNT_TYPE_U16,
397 			    num_elem,
398 			    sizeof(union event_ring_element),
399 			    &p_eq->chain, NULL))
400 		goto eq_allocate_fail;
401 
402 	/* register EQ completion on the SP SB */
403 	qed_int_register_cb(p_hwfn, qed_eq_completion,
404 			    p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
405 
406 	p_hwfn->p_eq = p_eq;
407 	return 0;
408 
409 eq_allocate_fail:
410 	kfree(p_eq);
411 	return -ENOMEM;
412 }
413 
414 void qed_eq_setup(struct qed_hwfn *p_hwfn)
415 {
416 	qed_chain_reset(&p_hwfn->p_eq->chain);
417 }
418 
419 void qed_eq_free(struct qed_hwfn *p_hwfn)
420 {
421 	if (!p_hwfn->p_eq)
422 		return;
423 
424 	qed_chain_free(p_hwfn->cdev, &p_hwfn->p_eq->chain);
425 
426 	kfree(p_hwfn->p_eq);
427 	p_hwfn->p_eq = NULL;
428 }
429 
430 /***************************************************************************
431 * CQE API - manipulate EQ functionality
432 ***************************************************************************/
433 static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
434 			      struct eth_slow_path_rx_cqe *cqe,
435 			      enum protocol_type protocol)
436 {
437 	if (IS_VF(p_hwfn->cdev))
438 		return 0;
439 
440 	/* @@@tmp - it's possible we'll eventually want to handle some
441 	 * actual commands that can arrive here, but for now this is only
442 	 * used to complete the ramrod using the echo value on the cqe
443 	 */
444 	return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
445 }
446 
447 int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
448 			   struct eth_slow_path_rx_cqe *cqe)
449 {
450 	int rc;
451 
452 	rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
453 	if (rc)
454 		DP_NOTICE(p_hwfn,
455 			  "Failed to handle RXQ CQE [cmd 0x%02x]\n",
456 			  cqe->ramrod_cmd_id);
457 
458 	return rc;
459 }
460 
461 /***************************************************************************
462 * Slow hwfn Queue (spq)
463 ***************************************************************************/
464 void qed_spq_setup(struct qed_hwfn *p_hwfn)
465 {
466 	struct qed_spq *p_spq = p_hwfn->p_spq;
467 	struct qed_spq_entry *p_virt = NULL;
468 	struct core_db_data *p_db_data;
469 	void __iomem *db_addr;
470 	dma_addr_t p_phys = 0;
471 	u32 i, capacity;
472 	int rc;
473 
474 	INIT_LIST_HEAD(&p_spq->pending);
475 	INIT_LIST_HEAD(&p_spq->completion_pending);
476 	INIT_LIST_HEAD(&p_spq->free_pool);
477 	INIT_LIST_HEAD(&p_spq->unlimited_pending);
478 	spin_lock_init(&p_spq->lock);
479 
480 	/* SPQ empty pool */
481 	p_phys	= p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
482 	p_virt	= p_spq->p_virt;
483 
484 	capacity = qed_chain_get_capacity(&p_spq->chain);
485 	for (i = 0; i < capacity; i++) {
486 		DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
487 
488 		list_add_tail(&p_virt->list, &p_spq->free_pool);
489 
490 		p_virt++;
491 		p_phys += sizeof(struct qed_spq_entry);
492 	}
493 
494 	/* Statistics */
495 	p_spq->normal_count		= 0;
496 	p_spq->comp_count		= 0;
497 	p_spq->comp_sent_count		= 0;
498 	p_spq->unlimited_pending_count	= 0;
499 
500 	bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
501 	p_spq->comp_bitmap_idx = 0;
502 
503 	/* SPQ cid, cannot fail */
504 	qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
505 	qed_spq_hw_initialize(p_hwfn, p_spq);
506 
507 	/* reset the chain itself */
508 	qed_chain_reset(&p_spq->chain);
509 
510 	/* Initialize the address/data of the SPQ doorbell */
511 	p_spq->db_addr_offset = qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY);
512 	p_db_data = &p_spq->db_data;
513 	memset(p_db_data, 0, sizeof(*p_db_data));
514 	SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
515 	SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
516 	SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
517 		  DQ_XCM_CORE_SPQ_PROD_CMD);
518 	p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
519 
520 	/* Register the SPQ doorbell with the doorbell recovery mechanism */
521 	db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells +
522 				   p_spq->db_addr_offset);
523 	rc = qed_db_recovery_add(p_hwfn->cdev, db_addr, &p_spq->db_data,
524 				 DB_REC_WIDTH_32B, DB_REC_KERNEL);
525 	if (rc)
526 		DP_INFO(p_hwfn,
527 			"Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
528 }
529 
530 int qed_spq_alloc(struct qed_hwfn *p_hwfn)
531 {
532 	struct qed_spq_entry *p_virt = NULL;
533 	struct qed_spq *p_spq = NULL;
534 	dma_addr_t p_phys = 0;
535 	u32 capacity;
536 
537 	/* SPQ struct */
538 	p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
539 	if (!p_spq)
540 		return -ENOMEM;
541 
542 	/* SPQ ring  */
543 	if (qed_chain_alloc(p_hwfn->cdev,
544 			    QED_CHAIN_USE_TO_PRODUCE,
545 			    QED_CHAIN_MODE_SINGLE,
546 			    QED_CHAIN_CNT_TYPE_U16,
547 			    0,   /* N/A when the mode is SINGLE */
548 			    sizeof(struct slow_path_element),
549 			    &p_spq->chain, NULL))
550 		goto spq_allocate_fail;
551 
552 	/* allocate and fill the SPQ elements (incl. ramrod data list) */
553 	capacity = qed_chain_get_capacity(&p_spq->chain);
554 	p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
555 				    capacity * sizeof(struct qed_spq_entry),
556 				    &p_phys, GFP_KERNEL);
557 	if (!p_virt)
558 		goto spq_allocate_fail;
559 
560 	p_spq->p_virt = p_virt;
561 	p_spq->p_phys = p_phys;
562 	p_hwfn->p_spq = p_spq;
563 
564 	return 0;
565 
566 spq_allocate_fail:
567 	qed_chain_free(p_hwfn->cdev, &p_spq->chain);
568 	kfree(p_spq);
569 	return -ENOMEM;
570 }
571 
572 void qed_spq_free(struct qed_hwfn *p_hwfn)
573 {
574 	struct qed_spq *p_spq = p_hwfn->p_spq;
575 	void __iomem *db_addr;
576 	u32 capacity;
577 
578 	if (!p_spq)
579 		return;
580 
581 	/* Delete the SPQ doorbell from the doorbell recovery mechanism */
582 	db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells +
583 				   p_spq->db_addr_offset);
584 	qed_db_recovery_del(p_hwfn->cdev, db_addr, &p_spq->db_data);
585 
586 	if (p_spq->p_virt) {
587 		capacity = qed_chain_get_capacity(&p_spq->chain);
588 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
589 				  capacity *
590 				  sizeof(struct qed_spq_entry),
591 				  p_spq->p_virt, p_spq->p_phys);
592 	}
593 
594 	qed_chain_free(p_hwfn->cdev, &p_spq->chain);
595 	kfree(p_spq);
596 	p_hwfn->p_spq = NULL;
597 }
598 
599 int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
600 {
601 	struct qed_spq *p_spq = p_hwfn->p_spq;
602 	struct qed_spq_entry *p_ent = NULL;
603 	int rc = 0;
604 
605 	spin_lock_bh(&p_spq->lock);
606 
607 	if (list_empty(&p_spq->free_pool)) {
608 		p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
609 		if (!p_ent) {
610 			DP_NOTICE(p_hwfn,
611 				  "Failed to allocate an SPQ entry for a pending ramrod\n");
612 			rc = -ENOMEM;
613 			goto out_unlock;
614 		}
615 		p_ent->queue = &p_spq->unlimited_pending;
616 	} else {
617 		p_ent = list_first_entry(&p_spq->free_pool,
618 					 struct qed_spq_entry, list);
619 		list_del(&p_ent->list);
620 		p_ent->queue = &p_spq->pending;
621 	}
622 
623 	*pp_ent = p_ent;
624 
625 out_unlock:
626 	spin_unlock_bh(&p_spq->lock);
627 	return rc;
628 }
629 
630 /* Locked variant; Should be called while the SPQ lock is taken */
631 static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
632 				   struct qed_spq_entry *p_ent)
633 {
634 	list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
635 }
636 
637 void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
638 {
639 	spin_lock_bh(&p_hwfn->p_spq->lock);
640 	__qed_spq_return_entry(p_hwfn, p_ent);
641 	spin_unlock_bh(&p_hwfn->p_spq->lock);
642 }
643 
644 /**
645  * @brief qed_spq_add_entry - adds a new entry to the pending
646  *        list. Should be used while lock is being held.
647  *
648  * Addes an entry to the pending list is there is room (en empty
649  * element is available in the free_pool), or else places the
650  * entry in the unlimited_pending pool.
651  *
652  * @param p_hwfn
653  * @param p_ent
654  * @param priority
655  *
656  * @return int
657  */
658 static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
659 			     struct qed_spq_entry *p_ent,
660 			     enum spq_priority priority)
661 {
662 	struct qed_spq *p_spq = p_hwfn->p_spq;
663 
664 	if (p_ent->queue == &p_spq->unlimited_pending) {
665 
666 		if (list_empty(&p_spq->free_pool)) {
667 			list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
668 			p_spq->unlimited_pending_count++;
669 
670 			return 0;
671 		} else {
672 			struct qed_spq_entry *p_en2;
673 
674 			p_en2 = list_first_entry(&p_spq->free_pool,
675 						 struct qed_spq_entry, list);
676 			list_del(&p_en2->list);
677 
678 			/* Copy the ring element physical pointer to the new
679 			 * entry, since we are about to override the entire ring
680 			 * entry and don't want to lose the pointer.
681 			 */
682 			p_ent->elem.data_ptr = p_en2->elem.data_ptr;
683 
684 			*p_en2 = *p_ent;
685 
686 			/* EBLOCK responsible to free the allocated p_ent */
687 			if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
688 				kfree(p_ent);
689 			else
690 				p_ent->post_ent = p_en2;
691 
692 			p_ent = p_en2;
693 		}
694 	}
695 
696 	/* entry is to be placed in 'pending' queue */
697 	switch (priority) {
698 	case QED_SPQ_PRIORITY_NORMAL:
699 		list_add_tail(&p_ent->list, &p_spq->pending);
700 		p_spq->normal_count++;
701 		break;
702 	case QED_SPQ_PRIORITY_HIGH:
703 		list_add(&p_ent->list, &p_spq->pending);
704 		p_spq->high_count++;
705 		break;
706 	default:
707 		return -EINVAL;
708 	}
709 
710 	return 0;
711 }
712 
713 /***************************************************************************
714 * Accessor
715 ***************************************************************************/
716 u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
717 {
718 	if (!p_hwfn->p_spq)
719 		return 0xffffffff;      /* illegal */
720 	return p_hwfn->p_spq->cid;
721 }
722 
723 /***************************************************************************
724 * Posting new Ramrods
725 ***************************************************************************/
726 static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
727 			     struct list_head *head, u32 keep_reserve)
728 {
729 	struct qed_spq *p_spq = p_hwfn->p_spq;
730 	int rc;
731 
732 	while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
733 	       !list_empty(head)) {
734 		struct qed_spq_entry *p_ent =
735 			list_first_entry(head, struct qed_spq_entry, list);
736 		list_move_tail(&p_ent->list, &p_spq->completion_pending);
737 		p_spq->comp_sent_count++;
738 
739 		rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
740 		if (rc) {
741 			list_del(&p_ent->list);
742 			__qed_spq_return_entry(p_hwfn, p_ent);
743 			return rc;
744 		}
745 	}
746 
747 	return 0;
748 }
749 
750 int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
751 {
752 	struct qed_spq *p_spq = p_hwfn->p_spq;
753 	struct qed_spq_entry *p_ent = NULL;
754 
755 	while (!list_empty(&p_spq->free_pool)) {
756 		if (list_empty(&p_spq->unlimited_pending))
757 			break;
758 
759 		p_ent = list_first_entry(&p_spq->unlimited_pending,
760 					 struct qed_spq_entry, list);
761 		if (!p_ent)
762 			return -EINVAL;
763 
764 		list_del(&p_ent->list);
765 
766 		qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
767 	}
768 
769 	return qed_spq_post_list(p_hwfn, &p_spq->pending,
770 				 SPQ_HIGH_PRI_RESERVE_DEFAULT);
771 }
772 
773 static void qed_spq_recov_set_ret_code(struct qed_spq_entry *p_ent,
774 				       u8 *fw_return_code)
775 {
776 	if (!fw_return_code)
777 		return;
778 
779 	if (p_ent->elem.hdr.protocol_id == PROTOCOLID_ROCE ||
780 	    p_ent->elem.hdr.protocol_id == PROTOCOLID_IWARP)
781 		*fw_return_code = RDMA_RETURN_OK;
782 }
783 
784 /* Avoid overriding of SPQ entries when getting out-of-order completions, by
785  * marking the completions in a bitmap and increasing the chain consumer only
786  * for the first successive completed entries.
787  */
788 static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo)
789 {
790 	u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
791 	struct qed_spq *p_spq = p_hwfn->p_spq;
792 
793 	__set_bit(pos, p_spq->p_comp_bitmap);
794 	while (test_bit(p_spq->comp_bitmap_idx,
795 			p_spq->p_comp_bitmap)) {
796 		__clear_bit(p_spq->comp_bitmap_idx,
797 			    p_spq->p_comp_bitmap);
798 		p_spq->comp_bitmap_idx++;
799 		qed_chain_return_produced(&p_spq->chain);
800 	}
801 }
802 
803 int qed_spq_post(struct qed_hwfn *p_hwfn,
804 		 struct qed_spq_entry *p_ent, u8 *fw_return_code)
805 {
806 	int rc = 0;
807 	struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
808 	bool b_ret_ent = true;
809 	bool eblock;
810 
811 	if (!p_hwfn)
812 		return -EINVAL;
813 
814 	if (!p_ent) {
815 		DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
816 		return -EINVAL;
817 	}
818 
819 	if (p_hwfn->cdev->recov_in_prog) {
820 		DP_VERBOSE(p_hwfn,
821 			   QED_MSG_SPQ,
822 			   "Recovery is in progress. Skip spq post [cmd %02x protocol %02x]\n",
823 			   p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
824 
825 		/* Let the flow complete w/o any error handling */
826 		qed_spq_recov_set_ret_code(p_ent, fw_return_code);
827 		return 0;
828 	}
829 
830 	/* Complete the entry */
831 	rc = qed_spq_fill_entry(p_hwfn, p_ent);
832 
833 	spin_lock_bh(&p_spq->lock);
834 
835 	/* Check return value after LOCK is taken for cleaner error flow */
836 	if (rc)
837 		goto spq_post_fail;
838 
839 	/* Check if entry is in block mode before qed_spq_add_entry,
840 	 * which might kfree p_ent.
841 	 */
842 	eblock = (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK);
843 
844 	/* Add the request to the pending queue */
845 	rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
846 	if (rc)
847 		goto spq_post_fail;
848 
849 	rc = qed_spq_pend_post(p_hwfn);
850 	if (rc) {
851 		/* Since it's possible that pending failed for a different
852 		 * entry [although unlikely], the failed entry was already
853 		 * dealt with; No need to return it here.
854 		 */
855 		b_ret_ent = false;
856 		goto spq_post_fail;
857 	}
858 
859 	spin_unlock_bh(&p_spq->lock);
860 
861 	if (eblock) {
862 		/* For entries in QED BLOCK mode, the completion code cannot
863 		 * perform the necessary cleanup - if it did, we couldn't
864 		 * access p_ent here to see whether it's successful or not.
865 		 * Thus, after gaining the answer perform the cleanup here.
866 		 */
867 		rc = qed_spq_block(p_hwfn, p_ent, fw_return_code,
868 				   p_ent->queue == &p_spq->unlimited_pending);
869 
870 		if (p_ent->queue == &p_spq->unlimited_pending) {
871 			struct qed_spq_entry *p_post_ent = p_ent->post_ent;
872 
873 			kfree(p_ent);
874 
875 			/* Return the entry which was actually posted */
876 			p_ent = p_post_ent;
877 		}
878 
879 		if (rc)
880 			goto spq_post_fail2;
881 
882 		/* return to pool */
883 		qed_spq_return_entry(p_hwfn, p_ent);
884 	}
885 	return rc;
886 
887 spq_post_fail2:
888 	spin_lock_bh(&p_spq->lock);
889 	list_del(&p_ent->list);
890 	qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo);
891 
892 spq_post_fail:
893 	/* return to the free pool */
894 	if (b_ret_ent)
895 		__qed_spq_return_entry(p_hwfn, p_ent);
896 	spin_unlock_bh(&p_spq->lock);
897 
898 	return rc;
899 }
900 
901 int qed_spq_completion(struct qed_hwfn *p_hwfn,
902 		       __le16 echo,
903 		       u8 fw_return_code,
904 		       union event_ring_data *p_data)
905 {
906 	struct qed_spq		*p_spq;
907 	struct qed_spq_entry	*p_ent = NULL;
908 	struct qed_spq_entry	*tmp;
909 	struct qed_spq_entry	*found = NULL;
910 
911 	if (!p_hwfn)
912 		return -EINVAL;
913 
914 	p_spq = p_hwfn->p_spq;
915 	if (!p_spq)
916 		return -EINVAL;
917 
918 	spin_lock_bh(&p_spq->lock);
919 	list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
920 		if (p_ent->elem.hdr.echo == echo) {
921 			list_del(&p_ent->list);
922 			qed_spq_comp_bmap_update(p_hwfn, echo);
923 			p_spq->comp_count++;
924 			found = p_ent;
925 			break;
926 		}
927 
928 		/* This is relatively uncommon - depends on scenarios
929 		 * which have mutliple per-PF sent ramrods.
930 		 */
931 		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
932 			   "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
933 			   le16_to_cpu(echo),
934 			   le16_to_cpu(p_ent->elem.hdr.echo));
935 	}
936 
937 	/* Release lock before callback, as callback may post
938 	 * an additional ramrod.
939 	 */
940 	spin_unlock_bh(&p_spq->lock);
941 
942 	if (!found) {
943 		DP_NOTICE(p_hwfn,
944 			  "Failed to find an entry this EQE [echo %04x] completes\n",
945 			  le16_to_cpu(echo));
946 		return -EEXIST;
947 	}
948 
949 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
950 		   "Complete EQE [echo %04x]: func %p cookie %p)\n",
951 		   le16_to_cpu(echo),
952 		   p_ent->comp_cb.function, p_ent->comp_cb.cookie);
953 	if (found->comp_cb.function)
954 		found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
955 					fw_return_code);
956 	else
957 		DP_VERBOSE(p_hwfn,
958 			   QED_MSG_SPQ,
959 			   "Got a completion without a callback function\n");
960 
961 	if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
962 		/* EBLOCK  is responsible for returning its own entry into the
963 		 * free list.
964 		 */
965 		qed_spq_return_entry(p_hwfn, found);
966 
967 	return 0;
968 }
969 
970 int qed_consq_alloc(struct qed_hwfn *p_hwfn)
971 {
972 	struct qed_consq *p_consq;
973 
974 	/* Allocate ConsQ struct */
975 	p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
976 	if (!p_consq)
977 		return -ENOMEM;
978 
979 	/* Allocate and initialize EQ chain*/
980 	if (qed_chain_alloc(p_hwfn->cdev,
981 			    QED_CHAIN_USE_TO_PRODUCE,
982 			    QED_CHAIN_MODE_PBL,
983 			    QED_CHAIN_CNT_TYPE_U16,
984 			    QED_CHAIN_PAGE_SIZE / 0x80,
985 			    0x80, &p_consq->chain, NULL))
986 		goto consq_allocate_fail;
987 
988 	p_hwfn->p_consq = p_consq;
989 	return 0;
990 
991 consq_allocate_fail:
992 	kfree(p_consq);
993 	return -ENOMEM;
994 }
995 
996 void qed_consq_setup(struct qed_hwfn *p_hwfn)
997 {
998 	qed_chain_reset(&p_hwfn->p_consq->chain);
999 }
1000 
1001 void qed_consq_free(struct qed_hwfn *p_hwfn)
1002 {
1003 	if (!p_hwfn->p_consq)
1004 		return;
1005 
1006 	qed_chain_free(p_hwfn->cdev, &p_hwfn->p_consq->chain);
1007 
1008 	kfree(p_hwfn->p_consq);
1009 	p_hwfn->p_consq = NULL;
1010 }
1011