xref: /openbmc/linux/drivers/net/ethernet/qlogic/qed/qed_spq.c (revision e983940270f10fe8551baf0098be76ea478294a3)
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8 
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
11 #include <linux/io.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/string.h>
21 #include "qed.h"
22 #include "qed_cxt.h"
23 #include "qed_dev_api.h"
24 #include "qed_hsi.h"
25 #include "qed_hw.h"
26 #include "qed_int.h"
27 #include "qed_mcp.h"
28 #include "qed_reg_addr.h"
29 #include "qed_sp.h"
30 #include "qed_sriov.h"
31 #if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
32 #include "qed_roce.h"
33 #endif
34 
35 /***************************************************************************
36 * Structures & Definitions
37 ***************************************************************************/
38 
39 #define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
40 #define SPQ_BLOCK_SLEEP_LENGTH          (1000)
41 
42 /***************************************************************************
43 * Blocking Imp. (BLOCK/EBLOCK mode)
44 ***************************************************************************/
45 static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
46 				void *cookie,
47 				union event_ring_data *data, u8 fw_return_code)
48 {
49 	struct qed_spq_comp_done *comp_done;
50 
51 	comp_done = (struct qed_spq_comp_done *)cookie;
52 
53 	comp_done->done			= 0x1;
54 	comp_done->fw_return_code	= fw_return_code;
55 
56 	/* make update visible to waiting thread */
57 	smp_wmb();
58 }
59 
60 static int qed_spq_block(struct qed_hwfn *p_hwfn,
61 			 struct qed_spq_entry *p_ent,
62 			 u8 *p_fw_ret)
63 {
64 	int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
65 	struct qed_spq_comp_done *comp_done;
66 	int rc;
67 
68 	comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
69 	while (sleep_count) {
70 		/* validate we receive completion update */
71 		smp_rmb();
72 		if (comp_done->done == 1) {
73 			if (p_fw_ret)
74 				*p_fw_ret = comp_done->fw_return_code;
75 			return 0;
76 		}
77 		usleep_range(5000, 10000);
78 		sleep_count--;
79 	}
80 
81 	DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
82 	rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
83 	if (rc != 0)
84 		DP_NOTICE(p_hwfn, "MCP drain failed\n");
85 
86 	/* Retry after drain */
87 	sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
88 	while (sleep_count) {
89 		/* validate we receive completion update */
90 		smp_rmb();
91 		if (comp_done->done == 1) {
92 			if (p_fw_ret)
93 				*p_fw_ret = comp_done->fw_return_code;
94 			return 0;
95 		}
96 		usleep_range(5000, 10000);
97 		sleep_count--;
98 	}
99 
100 	if (comp_done->done == 1) {
101 		if (p_fw_ret)
102 			*p_fw_ret = comp_done->fw_return_code;
103 		return 0;
104 	}
105 
106 	DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n");
107 
108 	return -EBUSY;
109 }
110 
111 /***************************************************************************
112 * SPQ entries inner API
113 ***************************************************************************/
114 static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
115 			      struct qed_spq_entry *p_ent)
116 {
117 	p_ent->flags = 0;
118 
119 	switch (p_ent->comp_mode) {
120 	case QED_SPQ_MODE_EBLOCK:
121 	case QED_SPQ_MODE_BLOCK:
122 		p_ent->comp_cb.function = qed_spq_blocking_cb;
123 		break;
124 	case QED_SPQ_MODE_CB:
125 		break;
126 	default:
127 		DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
128 			  p_ent->comp_mode);
129 		return -EINVAL;
130 	}
131 
132 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
133 		   "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
134 		   p_ent->elem.hdr.cid,
135 		   p_ent->elem.hdr.cmd_id,
136 		   p_ent->elem.hdr.protocol_id,
137 		   p_ent->elem.data_ptr.hi,
138 		   p_ent->elem.data_ptr.lo,
139 		   D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
140 			   QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
141 			   "MODE_CB"));
142 
143 	return 0;
144 }
145 
146 /***************************************************************************
147 * HSI access
148 ***************************************************************************/
149 static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
150 				  struct qed_spq *p_spq)
151 {
152 	u16				pq;
153 	struct qed_cxt_info		cxt_info;
154 	struct core_conn_context	*p_cxt;
155 	union qed_qm_pq_params		pq_params;
156 	int				rc;
157 
158 	cxt_info.iid = p_spq->cid;
159 
160 	rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
161 
162 	if (rc < 0) {
163 		DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
164 			  p_spq->cid);
165 		return;
166 	}
167 
168 	p_cxt = cxt_info.p_cxt;
169 
170 	SET_FIELD(p_cxt->xstorm_ag_context.flags10,
171 		  XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
172 	SET_FIELD(p_cxt->xstorm_ag_context.flags1,
173 		  XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
174 	SET_FIELD(p_cxt->xstorm_ag_context.flags9,
175 		  XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
176 
177 	/* QM physical queue */
178 	memset(&pq_params, 0, sizeof(pq_params));
179 	pq_params.core.tc = LB_TC;
180 	pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
181 	p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
182 
183 	p_cxt->xstorm_st_context.spq_base_lo =
184 		DMA_LO_LE(p_spq->chain.p_phys_addr);
185 	p_cxt->xstorm_st_context.spq_base_hi =
186 		DMA_HI_LE(p_spq->chain.p_phys_addr);
187 
188 	DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
189 		       p_hwfn->p_consq->chain.p_phys_addr);
190 }
191 
192 static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
193 			   struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
194 {
195 	struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
196 	u16 echo = qed_chain_get_prod_idx(p_chain);
197 	struct slow_path_element	*elem;
198 	struct core_db_data		db;
199 
200 	p_ent->elem.hdr.echo	= cpu_to_le16(echo);
201 	elem = qed_chain_produce(p_chain);
202 	if (!elem) {
203 		DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
204 		return -EINVAL;
205 	}
206 
207 	*elem = p_ent->elem; /* struct assignment */
208 
209 	/* send a doorbell on the slow hwfn session */
210 	memset(&db, 0, sizeof(db));
211 	SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
212 	SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
213 	SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
214 		  DQ_XCM_CORE_SPQ_PROD_CMD);
215 	db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
216 	db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
217 
218 	/* make sure the SPQE is updated before the doorbell */
219 	wmb();
220 
221 	DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
222 
223 	/* make sure doorbell is rang */
224 	wmb();
225 
226 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
227 		   "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
228 		   qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
229 		   p_spq->cid, db.params, db.agg_flags,
230 		   qed_chain_get_prod_idx(p_chain));
231 
232 	return 0;
233 }
234 
235 /***************************************************************************
236 * Asynchronous events
237 ***************************************************************************/
238 static int
239 qed_async_event_completion(struct qed_hwfn *p_hwfn,
240 			   struct event_ring_entry *p_eqe)
241 {
242 	switch (p_eqe->protocol_id) {
243 #if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
244 	case PROTOCOLID_ROCE:
245 		qed_async_roce_event(p_hwfn, p_eqe);
246 		return 0;
247 #endif
248 	case PROTOCOLID_COMMON:
249 		return qed_sriov_eqe_event(p_hwfn,
250 					   p_eqe->opcode,
251 					   p_eqe->echo, &p_eqe->data);
252 	default:
253 		DP_NOTICE(p_hwfn,
254 			  "Unknown Async completion for protocol: %d\n",
255 			  p_eqe->protocol_id);
256 		return -EINVAL;
257 	}
258 }
259 
260 /***************************************************************************
261 * EQ API
262 ***************************************************************************/
263 void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
264 {
265 	u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
266 		   USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
267 
268 	REG_WR16(p_hwfn, addr, prod);
269 
270 	/* keep prod updates ordered */
271 	mmiowb();
272 }
273 
274 int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
275 {
276 	struct qed_eq *p_eq = cookie;
277 	struct qed_chain *p_chain = &p_eq->chain;
278 	int rc = 0;
279 
280 	/* take a snapshot of the FW consumer */
281 	u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
282 
283 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
284 
285 	/* Need to guarantee the fw_cons index we use points to a usuable
286 	 * element (to comply with our chain), so our macros would comply
287 	 */
288 	if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
289 	    qed_chain_get_usable_per_page(p_chain))
290 		fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
291 
292 	/* Complete current segment of eq entries */
293 	while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
294 		struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
295 
296 		if (!p_eqe) {
297 			rc = -EINVAL;
298 			break;
299 		}
300 
301 		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
302 			   "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
303 			   p_eqe->opcode,
304 			   p_eqe->protocol_id,
305 			   p_eqe->reserved0,
306 			   le16_to_cpu(p_eqe->echo),
307 			   p_eqe->fw_return_code,
308 			   p_eqe->flags);
309 
310 		if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
311 			if (qed_async_event_completion(p_hwfn, p_eqe))
312 				rc = -EINVAL;
313 		} else if (qed_spq_completion(p_hwfn,
314 					      p_eqe->echo,
315 					      p_eqe->fw_return_code,
316 					      &p_eqe->data)) {
317 			rc = -EINVAL;
318 		}
319 
320 		qed_chain_recycle_consumed(p_chain);
321 	}
322 
323 	qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
324 
325 	return rc;
326 }
327 
328 struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
329 {
330 	struct qed_eq *p_eq;
331 
332 	/* Allocate EQ struct */
333 	p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
334 	if (!p_eq)
335 		return NULL;
336 
337 	/* Allocate and initialize EQ chain*/
338 	if (qed_chain_alloc(p_hwfn->cdev,
339 			    QED_CHAIN_USE_TO_PRODUCE,
340 			    QED_CHAIN_MODE_PBL,
341 			    QED_CHAIN_CNT_TYPE_U16,
342 			    num_elem,
343 			    sizeof(union event_ring_element),
344 			    &p_eq->chain))
345 		goto eq_allocate_fail;
346 
347 	/* register EQ completion on the SP SB */
348 	qed_int_register_cb(p_hwfn, qed_eq_completion,
349 			    p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
350 
351 	return p_eq;
352 
353 eq_allocate_fail:
354 	qed_eq_free(p_hwfn, p_eq);
355 	return NULL;
356 }
357 
358 void qed_eq_setup(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
359 {
360 	qed_chain_reset(&p_eq->chain);
361 }
362 
363 void qed_eq_free(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
364 {
365 	if (!p_eq)
366 		return;
367 	qed_chain_free(p_hwfn->cdev, &p_eq->chain);
368 	kfree(p_eq);
369 }
370 
371 /***************************************************************************
372 * CQE API - manipulate EQ functionality
373 ***************************************************************************/
374 static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
375 			      struct eth_slow_path_rx_cqe *cqe,
376 			      enum protocol_type protocol)
377 {
378 	if (IS_VF(p_hwfn->cdev))
379 		return 0;
380 
381 	/* @@@tmp - it's possible we'll eventually want to handle some
382 	 * actual commands that can arrive here, but for now this is only
383 	 * used to complete the ramrod using the echo value on the cqe
384 	 */
385 	return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
386 }
387 
388 int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
389 			   struct eth_slow_path_rx_cqe *cqe)
390 {
391 	int rc;
392 
393 	rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
394 	if (rc)
395 		DP_NOTICE(p_hwfn,
396 			  "Failed to handle RXQ CQE [cmd 0x%02x]\n",
397 			  cqe->ramrod_cmd_id);
398 
399 	return rc;
400 }
401 
402 /***************************************************************************
403 * Slow hwfn Queue (spq)
404 ***************************************************************************/
405 void qed_spq_setup(struct qed_hwfn *p_hwfn)
406 {
407 	struct qed_spq *p_spq = p_hwfn->p_spq;
408 	struct qed_spq_entry *p_virt = NULL;
409 	dma_addr_t p_phys = 0;
410 	u32 i, capacity;
411 
412 	INIT_LIST_HEAD(&p_spq->pending);
413 	INIT_LIST_HEAD(&p_spq->completion_pending);
414 	INIT_LIST_HEAD(&p_spq->free_pool);
415 	INIT_LIST_HEAD(&p_spq->unlimited_pending);
416 	spin_lock_init(&p_spq->lock);
417 
418 	/* SPQ empty pool */
419 	p_phys	= p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
420 	p_virt	= p_spq->p_virt;
421 
422 	capacity = qed_chain_get_capacity(&p_spq->chain);
423 	for (i = 0; i < capacity; i++) {
424 		DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
425 
426 		list_add_tail(&p_virt->list, &p_spq->free_pool);
427 
428 		p_virt++;
429 		p_phys += sizeof(struct qed_spq_entry);
430 	}
431 
432 	/* Statistics */
433 	p_spq->normal_count		= 0;
434 	p_spq->comp_count		= 0;
435 	p_spq->comp_sent_count		= 0;
436 	p_spq->unlimited_pending_count	= 0;
437 
438 	bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
439 	p_spq->comp_bitmap_idx = 0;
440 
441 	/* SPQ cid, cannot fail */
442 	qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
443 	qed_spq_hw_initialize(p_hwfn, p_spq);
444 
445 	/* reset the chain itself */
446 	qed_chain_reset(&p_spq->chain);
447 }
448 
449 int qed_spq_alloc(struct qed_hwfn *p_hwfn)
450 {
451 	struct qed_spq_entry *p_virt = NULL;
452 	struct qed_spq *p_spq = NULL;
453 	dma_addr_t p_phys = 0;
454 	u32 capacity;
455 
456 	/* SPQ struct */
457 	p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
458 	if (!p_spq)
459 		return -ENOMEM;
460 
461 	/* SPQ ring  */
462 	if (qed_chain_alloc(p_hwfn->cdev,
463 			    QED_CHAIN_USE_TO_PRODUCE,
464 			    QED_CHAIN_MODE_SINGLE,
465 			    QED_CHAIN_CNT_TYPE_U16,
466 			    0,   /* N/A when the mode is SINGLE */
467 			    sizeof(struct slow_path_element),
468 			    &p_spq->chain))
469 		goto spq_allocate_fail;
470 
471 	/* allocate and fill the SPQ elements (incl. ramrod data list) */
472 	capacity = qed_chain_get_capacity(&p_spq->chain);
473 	p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
474 				    capacity * sizeof(struct qed_spq_entry),
475 				    &p_phys, GFP_KERNEL);
476 	if (!p_virt)
477 		goto spq_allocate_fail;
478 
479 	p_spq->p_virt = p_virt;
480 	p_spq->p_phys = p_phys;
481 	p_hwfn->p_spq = p_spq;
482 
483 	return 0;
484 
485 spq_allocate_fail:
486 	qed_chain_free(p_hwfn->cdev, &p_spq->chain);
487 	kfree(p_spq);
488 	return -ENOMEM;
489 }
490 
491 void qed_spq_free(struct qed_hwfn *p_hwfn)
492 {
493 	struct qed_spq *p_spq = p_hwfn->p_spq;
494 	u32 capacity;
495 
496 	if (!p_spq)
497 		return;
498 
499 	if (p_spq->p_virt) {
500 		capacity = qed_chain_get_capacity(&p_spq->chain);
501 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
502 				  capacity *
503 				  sizeof(struct qed_spq_entry),
504 				  p_spq->p_virt, p_spq->p_phys);
505 	}
506 
507 	qed_chain_free(p_hwfn->cdev, &p_spq->chain);
508 	;
509 	kfree(p_spq);
510 }
511 
512 int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
513 {
514 	struct qed_spq *p_spq = p_hwfn->p_spq;
515 	struct qed_spq_entry *p_ent = NULL;
516 	int rc = 0;
517 
518 	spin_lock_bh(&p_spq->lock);
519 
520 	if (list_empty(&p_spq->free_pool)) {
521 		p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
522 		if (!p_ent) {
523 			DP_NOTICE(p_hwfn,
524 				  "Failed to allocate an SPQ entry for a pending ramrod\n");
525 			rc = -ENOMEM;
526 			goto out_unlock;
527 		}
528 		p_ent->queue = &p_spq->unlimited_pending;
529 	} else {
530 		p_ent = list_first_entry(&p_spq->free_pool,
531 					 struct qed_spq_entry, list);
532 		list_del(&p_ent->list);
533 		p_ent->queue = &p_spq->pending;
534 	}
535 
536 	*pp_ent = p_ent;
537 
538 out_unlock:
539 	spin_unlock_bh(&p_spq->lock);
540 	return rc;
541 }
542 
543 /* Locked variant; Should be called while the SPQ lock is taken */
544 static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
545 				   struct qed_spq_entry *p_ent)
546 {
547 	list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
548 }
549 
550 void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
551 {
552 	spin_lock_bh(&p_hwfn->p_spq->lock);
553 	__qed_spq_return_entry(p_hwfn, p_ent);
554 	spin_unlock_bh(&p_hwfn->p_spq->lock);
555 }
556 
557 /**
558  * @brief qed_spq_add_entry - adds a new entry to the pending
559  *        list. Should be used while lock is being held.
560  *
561  * Addes an entry to the pending list is there is room (en empty
562  * element is available in the free_pool), or else places the
563  * entry in the unlimited_pending pool.
564  *
565  * @param p_hwfn
566  * @param p_ent
567  * @param priority
568  *
569  * @return int
570  */
571 static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
572 			     struct qed_spq_entry *p_ent,
573 			     enum spq_priority priority)
574 {
575 	struct qed_spq *p_spq = p_hwfn->p_spq;
576 
577 	if (p_ent->queue == &p_spq->unlimited_pending) {
578 
579 		if (list_empty(&p_spq->free_pool)) {
580 			list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
581 			p_spq->unlimited_pending_count++;
582 
583 			return 0;
584 		} else {
585 			struct qed_spq_entry *p_en2;
586 
587 			p_en2 = list_first_entry(&p_spq->free_pool,
588 						 struct qed_spq_entry, list);
589 			list_del(&p_en2->list);
590 
591 			/* Copy the ring element physical pointer to the new
592 			 * entry, since we are about to override the entire ring
593 			 * entry and don't want to lose the pointer.
594 			 */
595 			p_ent->elem.data_ptr = p_en2->elem.data_ptr;
596 
597 			*p_en2 = *p_ent;
598 
599 			/* EBLOCK responsible to free the allocated p_ent */
600 			if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
601 				kfree(p_ent);
602 
603 			p_ent = p_en2;
604 		}
605 	}
606 
607 	/* entry is to be placed in 'pending' queue */
608 	switch (priority) {
609 	case QED_SPQ_PRIORITY_NORMAL:
610 		list_add_tail(&p_ent->list, &p_spq->pending);
611 		p_spq->normal_count++;
612 		break;
613 	case QED_SPQ_PRIORITY_HIGH:
614 		list_add(&p_ent->list, &p_spq->pending);
615 		p_spq->high_count++;
616 		break;
617 	default:
618 		return -EINVAL;
619 	}
620 
621 	return 0;
622 }
623 
624 /***************************************************************************
625 * Accessor
626 ***************************************************************************/
627 u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
628 {
629 	if (!p_hwfn->p_spq)
630 		return 0xffffffff;      /* illegal */
631 	return p_hwfn->p_spq->cid;
632 }
633 
634 /***************************************************************************
635 * Posting new Ramrods
636 ***************************************************************************/
637 static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
638 			     struct list_head *head, u32 keep_reserve)
639 {
640 	struct qed_spq *p_spq = p_hwfn->p_spq;
641 	int rc;
642 
643 	while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
644 	       !list_empty(head)) {
645 		struct qed_spq_entry *p_ent =
646 			list_first_entry(head, struct qed_spq_entry, list);
647 		list_del(&p_ent->list);
648 		list_add_tail(&p_ent->list, &p_spq->completion_pending);
649 		p_spq->comp_sent_count++;
650 
651 		rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
652 		if (rc) {
653 			list_del(&p_ent->list);
654 			__qed_spq_return_entry(p_hwfn, p_ent);
655 			return rc;
656 		}
657 	}
658 
659 	return 0;
660 }
661 
662 static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
663 {
664 	struct qed_spq *p_spq = p_hwfn->p_spq;
665 	struct qed_spq_entry *p_ent = NULL;
666 
667 	while (!list_empty(&p_spq->free_pool)) {
668 		if (list_empty(&p_spq->unlimited_pending))
669 			break;
670 
671 		p_ent = list_first_entry(&p_spq->unlimited_pending,
672 					 struct qed_spq_entry, list);
673 		if (!p_ent)
674 			return -EINVAL;
675 
676 		list_del(&p_ent->list);
677 
678 		qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
679 	}
680 
681 	return qed_spq_post_list(p_hwfn, &p_spq->pending,
682 				 SPQ_HIGH_PRI_RESERVE_DEFAULT);
683 }
684 
685 int qed_spq_post(struct qed_hwfn *p_hwfn,
686 		 struct qed_spq_entry *p_ent, u8 *fw_return_code)
687 {
688 	int rc = 0;
689 	struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
690 	bool b_ret_ent = true;
691 
692 	if (!p_hwfn)
693 		return -EINVAL;
694 
695 	if (!p_ent) {
696 		DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
697 		return -EINVAL;
698 	}
699 
700 	/* Complete the entry */
701 	rc = qed_spq_fill_entry(p_hwfn, p_ent);
702 
703 	spin_lock_bh(&p_spq->lock);
704 
705 	/* Check return value after LOCK is taken for cleaner error flow */
706 	if (rc)
707 		goto spq_post_fail;
708 
709 	/* Add the request to the pending queue */
710 	rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
711 	if (rc)
712 		goto spq_post_fail;
713 
714 	rc = qed_spq_pend_post(p_hwfn);
715 	if (rc) {
716 		/* Since it's possible that pending failed for a different
717 		 * entry [although unlikely], the failed entry was already
718 		 * dealt with; No need to return it here.
719 		 */
720 		b_ret_ent = false;
721 		goto spq_post_fail;
722 	}
723 
724 	spin_unlock_bh(&p_spq->lock);
725 
726 	if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
727 		/* For entries in QED BLOCK mode, the completion code cannot
728 		 * perform the necessary cleanup - if it did, we couldn't
729 		 * access p_ent here to see whether it's successful or not.
730 		 * Thus, after gaining the answer perform the cleanup here.
731 		 */
732 		rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
733 
734 		if (p_ent->queue == &p_spq->unlimited_pending) {
735 			/* This is an allocated p_ent which does not need to
736 			 * return to pool.
737 			 */
738 			kfree(p_ent);
739 			return rc;
740 		}
741 
742 		if (rc)
743 			goto spq_post_fail2;
744 
745 		/* return to pool */
746 		qed_spq_return_entry(p_hwfn, p_ent);
747 	}
748 	return rc;
749 
750 spq_post_fail2:
751 	spin_lock_bh(&p_spq->lock);
752 	list_del(&p_ent->list);
753 	qed_chain_return_produced(&p_spq->chain);
754 
755 spq_post_fail:
756 	/* return to the free pool */
757 	if (b_ret_ent)
758 		__qed_spq_return_entry(p_hwfn, p_ent);
759 	spin_unlock_bh(&p_spq->lock);
760 
761 	return rc;
762 }
763 
764 int qed_spq_completion(struct qed_hwfn *p_hwfn,
765 		       __le16 echo,
766 		       u8 fw_return_code,
767 		       union event_ring_data *p_data)
768 {
769 	struct qed_spq		*p_spq;
770 	struct qed_spq_entry	*p_ent = NULL;
771 	struct qed_spq_entry	*tmp;
772 	struct qed_spq_entry	*found = NULL;
773 	int			rc;
774 
775 	if (!p_hwfn)
776 		return -EINVAL;
777 
778 	p_spq = p_hwfn->p_spq;
779 	if (!p_spq)
780 		return -EINVAL;
781 
782 	spin_lock_bh(&p_spq->lock);
783 	list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
784 		if (p_ent->elem.hdr.echo == echo) {
785 			u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
786 
787 			list_del(&p_ent->list);
788 
789 			/* Avoid overriding of SPQ entries when getting
790 			 * out-of-order completions, by marking the completions
791 			 * in a bitmap and increasing the chain consumer only
792 			 * for the first successive completed entries.
793 			 */
794 			__set_bit(pos, p_spq->p_comp_bitmap);
795 
796 			while (test_bit(p_spq->comp_bitmap_idx,
797 					p_spq->p_comp_bitmap)) {
798 				__clear_bit(p_spq->comp_bitmap_idx,
799 					    p_spq->p_comp_bitmap);
800 				p_spq->comp_bitmap_idx++;
801 				qed_chain_return_produced(&p_spq->chain);
802 			}
803 
804 			p_spq->comp_count++;
805 			found = p_ent;
806 			break;
807 		}
808 
809 		/* This is relatively uncommon - depends on scenarios
810 		 * which have mutliple per-PF sent ramrods.
811 		 */
812 		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
813 			   "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
814 			   le16_to_cpu(echo),
815 			   le16_to_cpu(p_ent->elem.hdr.echo));
816 	}
817 
818 	/* Release lock before callback, as callback may post
819 	 * an additional ramrod.
820 	 */
821 	spin_unlock_bh(&p_spq->lock);
822 
823 	if (!found) {
824 		DP_NOTICE(p_hwfn,
825 			  "Failed to find an entry this EQE [echo %04x] completes\n",
826 			  le16_to_cpu(echo));
827 		return -EEXIST;
828 	}
829 
830 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
831 		   "Complete EQE [echo %04x]: func %p cookie %p)\n",
832 		   le16_to_cpu(echo),
833 		   p_ent->comp_cb.function, p_ent->comp_cb.cookie);
834 	if (found->comp_cb.function)
835 		found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
836 					fw_return_code);
837 	else
838 		DP_VERBOSE(p_hwfn,
839 			   QED_MSG_SPQ,
840 			   "Got a completion without a callback function\n");
841 
842 	if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
843 	    (found->queue == &p_spq->unlimited_pending))
844 		/* EBLOCK  is responsible for returning its own entry into the
845 		 * free list, unless it originally added the entry into the
846 		 * unlimited pending list.
847 		 */
848 		qed_spq_return_entry(p_hwfn, found);
849 
850 	/* Attempt to post pending requests */
851 	spin_lock_bh(&p_spq->lock);
852 	rc = qed_spq_pend_post(p_hwfn);
853 	spin_unlock_bh(&p_spq->lock);
854 
855 	return rc;
856 }
857 
858 struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
859 {
860 	struct qed_consq *p_consq;
861 
862 	/* Allocate ConsQ struct */
863 	p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
864 	if (!p_consq)
865 		return NULL;
866 
867 	/* Allocate and initialize EQ chain*/
868 	if (qed_chain_alloc(p_hwfn->cdev,
869 			    QED_CHAIN_USE_TO_PRODUCE,
870 			    QED_CHAIN_MODE_PBL,
871 			    QED_CHAIN_CNT_TYPE_U16,
872 			    QED_CHAIN_PAGE_SIZE / 0x80,
873 			    0x80, &p_consq->chain))
874 		goto consq_allocate_fail;
875 
876 	return p_consq;
877 
878 consq_allocate_fail:
879 	qed_consq_free(p_hwfn, p_consq);
880 	return NULL;
881 }
882 
883 void qed_consq_setup(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
884 {
885 	qed_chain_reset(&p_consq->chain);
886 }
887 
888 void qed_consq_free(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
889 {
890 	if (!p_consq)
891 		return;
892 	qed_chain_free(p_hwfn->cdev, &p_consq->chain);
893 	kfree(p_consq);
894 }
895