1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8 
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
11 #include <linux/io.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/string.h>
21 #include "qed.h"
22 #include "qed_cxt.h"
23 #include "qed_dev_api.h"
24 #include "qed_hsi.h"
25 #include "qed_hw.h"
26 #include "qed_int.h"
27 #include "qed_iscsi.h"
28 #include "qed_mcp.h"
29 #include "qed_ooo.h"
30 #include "qed_reg_addr.h"
31 #include "qed_sp.h"
32 #include "qed_sriov.h"
33 #include "qed_roce.h"
34 
35 /***************************************************************************
36 * Structures & Definitions
37 ***************************************************************************/
38 
39 #define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
40 
41 #define SPQ_BLOCK_DELAY_MAX_ITER        (10)
42 #define SPQ_BLOCK_DELAY_US              (10)
43 #define SPQ_BLOCK_SLEEP_MAX_ITER        (1000)
44 #define SPQ_BLOCK_SLEEP_MS              (5)
45 
46 /***************************************************************************
47 * Blocking Imp. (BLOCK/EBLOCK mode)
48 ***************************************************************************/
49 static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
50 				void *cookie,
51 				union event_ring_data *data, u8 fw_return_code)
52 {
53 	struct qed_spq_comp_done *comp_done;
54 
55 	comp_done = (struct qed_spq_comp_done *)cookie;
56 
57 	comp_done->fw_return_code = fw_return_code;
58 
59 	/* Make sure completion done is visible on waiting thread */
60 	smp_store_release(&comp_done->done, 0x1);
61 }
62 
63 static int __qed_spq_block(struct qed_hwfn *p_hwfn,
64 			   struct qed_spq_entry *p_ent,
65 			   u8 *p_fw_ret, bool sleep_between_iter)
66 {
67 	struct qed_spq_comp_done *comp_done;
68 	u32 iter_cnt;
69 
70 	comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
71 	iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
72 				      : SPQ_BLOCK_DELAY_MAX_ITER;
73 
74 	while (iter_cnt--) {
75 		/* Validate we receive completion update */
76 		if (READ_ONCE(comp_done->done) == 1) {
77 			/* Read updated FW return value */
78 			smp_read_barrier_depends();
79 			if (p_fw_ret)
80 				*p_fw_ret = comp_done->fw_return_code;
81 			return 0;
82 		}
83 
84 		if (sleep_between_iter)
85 			msleep(SPQ_BLOCK_SLEEP_MS);
86 		else
87 			udelay(SPQ_BLOCK_DELAY_US);
88 	}
89 
90 	return -EBUSY;
91 }
92 
93 static int qed_spq_block(struct qed_hwfn *p_hwfn,
94 			 struct qed_spq_entry *p_ent,
95 			 u8 *p_fw_ret, bool skip_quick_poll)
96 {
97 	struct qed_spq_comp_done *comp_done;
98 	int rc;
99 
100 	/* A relatively short polling period w/o sleeping, to allow the FW to
101 	 * complete the ramrod and thus possibly to avoid the following sleeps.
102 	 */
103 	if (!skip_quick_poll) {
104 		rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false);
105 		if (!rc)
106 			return 0;
107 	}
108 
109 	/* Move to polling with a sleeping period between iterations */
110 	rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
111 	if (!rc)
112 		return 0;
113 
114 	DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
115 	rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
116 	if (rc) {
117 		DP_NOTICE(p_hwfn, "MCP drain failed\n");
118 		goto err;
119 	}
120 
121 	/* Retry after drain */
122 	rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
123 	if (!rc)
124 		return 0;
125 
126 	comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
127 	if (comp_done->done == 1) {
128 		if (p_fw_ret)
129 			*p_fw_ret = comp_done->fw_return_code;
130 		return 0;
131 	}
132 err:
133 	DP_NOTICE(p_hwfn,
134 		  "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
135 		  le32_to_cpu(p_ent->elem.hdr.cid),
136 		  p_ent->elem.hdr.cmd_id,
137 		  p_ent->elem.hdr.protocol_id,
138 		  le16_to_cpu(p_ent->elem.hdr.echo));
139 
140 	return -EBUSY;
141 }
142 
143 /***************************************************************************
144 * SPQ entries inner API
145 ***************************************************************************/
146 static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
147 			      struct qed_spq_entry *p_ent)
148 {
149 	p_ent->flags = 0;
150 
151 	switch (p_ent->comp_mode) {
152 	case QED_SPQ_MODE_EBLOCK:
153 	case QED_SPQ_MODE_BLOCK:
154 		p_ent->comp_cb.function = qed_spq_blocking_cb;
155 		break;
156 	case QED_SPQ_MODE_CB:
157 		break;
158 	default:
159 		DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
160 			  p_ent->comp_mode);
161 		return -EINVAL;
162 	}
163 
164 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
165 		   "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
166 		   p_ent->elem.hdr.cid,
167 		   p_ent->elem.hdr.cmd_id,
168 		   p_ent->elem.hdr.protocol_id,
169 		   p_ent->elem.data_ptr.hi,
170 		   p_ent->elem.data_ptr.lo,
171 		   D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
172 			   QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
173 			   "MODE_CB"));
174 
175 	return 0;
176 }
177 
178 /***************************************************************************
179 * HSI access
180 ***************************************************************************/
181 static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
182 				  struct qed_spq *p_spq)
183 {
184 	u16				pq;
185 	struct qed_cxt_info		cxt_info;
186 	struct core_conn_context	*p_cxt;
187 	union qed_qm_pq_params		pq_params;
188 	int				rc;
189 
190 	cxt_info.iid = p_spq->cid;
191 
192 	rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
193 
194 	if (rc < 0) {
195 		DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
196 			  p_spq->cid);
197 		return;
198 	}
199 
200 	p_cxt = cxt_info.p_cxt;
201 
202 	SET_FIELD(p_cxt->xstorm_ag_context.flags10,
203 		  XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
204 	SET_FIELD(p_cxt->xstorm_ag_context.flags1,
205 		  XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
206 	SET_FIELD(p_cxt->xstorm_ag_context.flags9,
207 		  XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
208 
209 	/* QM physical queue */
210 	memset(&pq_params, 0, sizeof(pq_params));
211 	pq_params.core.tc = LB_TC;
212 	pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
213 	p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
214 
215 	p_cxt->xstorm_st_context.spq_base_lo =
216 		DMA_LO_LE(p_spq->chain.p_phys_addr);
217 	p_cxt->xstorm_st_context.spq_base_hi =
218 		DMA_HI_LE(p_spq->chain.p_phys_addr);
219 
220 	DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
221 		       p_hwfn->p_consq->chain.p_phys_addr);
222 }
223 
224 static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
225 			   struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
226 {
227 	struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
228 	u16 echo = qed_chain_get_prod_idx(p_chain);
229 	struct slow_path_element	*elem;
230 	struct core_db_data		db;
231 
232 	p_ent->elem.hdr.echo	= cpu_to_le16(echo);
233 	elem = qed_chain_produce(p_chain);
234 	if (!elem) {
235 		DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
236 		return -EINVAL;
237 	}
238 
239 	*elem = p_ent->elem; /* struct assignment */
240 
241 	/* send a doorbell on the slow hwfn session */
242 	memset(&db, 0, sizeof(db));
243 	SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
244 	SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
245 	SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
246 		  DQ_XCM_CORE_SPQ_PROD_CMD);
247 	db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
248 	db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
249 
250 	/* make sure the SPQE is updated before the doorbell */
251 	wmb();
252 
253 	DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
254 
255 	/* make sure doorbell is rang */
256 	wmb();
257 
258 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
259 		   "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
260 		   qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
261 		   p_spq->cid, db.params, db.agg_flags,
262 		   qed_chain_get_prod_idx(p_chain));
263 
264 	return 0;
265 }
266 
267 /***************************************************************************
268 * Asynchronous events
269 ***************************************************************************/
270 static int
271 qed_async_event_completion(struct qed_hwfn *p_hwfn,
272 			   struct event_ring_entry *p_eqe)
273 {
274 	switch (p_eqe->protocol_id) {
275 	case PROTOCOLID_ROCE:
276 		qed_async_roce_event(p_hwfn, p_eqe);
277 		return 0;
278 	case PROTOCOLID_COMMON:
279 		return qed_sriov_eqe_event(p_hwfn,
280 					   p_eqe->opcode,
281 					   p_eqe->echo, &p_eqe->data);
282 	case PROTOCOLID_ISCSI:
283 		if (!IS_ENABLED(CONFIG_QED_ISCSI))
284 			return -EINVAL;
285 		if (p_eqe->opcode == ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES) {
286 			u32 cid = le32_to_cpu(p_eqe->data.iscsi_info.cid);
287 
288 			qed_ooo_release_connection_isles(p_hwfn,
289 							 p_hwfn->p_ooo_info,
290 							 cid);
291 			return 0;
292 		}
293 
294 		if (p_hwfn->p_iscsi_info->event_cb) {
295 			struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
296 
297 			return p_iscsi->event_cb(p_iscsi->event_context,
298 						 p_eqe->opcode, &p_eqe->data);
299 		} else {
300 			DP_NOTICE(p_hwfn,
301 				  "iSCSI async completion is not set\n");
302 			return -EINVAL;
303 		}
304 	default:
305 		DP_NOTICE(p_hwfn,
306 			  "Unknown Async completion for protocol: %d\n",
307 			  p_eqe->protocol_id);
308 		return -EINVAL;
309 	}
310 }
311 
312 /***************************************************************************
313 * EQ API
314 ***************************************************************************/
315 void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
316 {
317 	u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
318 		   USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
319 
320 	REG_WR16(p_hwfn, addr, prod);
321 
322 	/* keep prod updates ordered */
323 	mmiowb();
324 }
325 
326 int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
327 {
328 	struct qed_eq *p_eq = cookie;
329 	struct qed_chain *p_chain = &p_eq->chain;
330 	int rc = 0;
331 
332 	/* take a snapshot of the FW consumer */
333 	u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
334 
335 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
336 
337 	/* Need to guarantee the fw_cons index we use points to a usuable
338 	 * element (to comply with our chain), so our macros would comply
339 	 */
340 	if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
341 	    qed_chain_get_usable_per_page(p_chain))
342 		fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
343 
344 	/* Complete current segment of eq entries */
345 	while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
346 		struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
347 
348 		if (!p_eqe) {
349 			rc = -EINVAL;
350 			break;
351 		}
352 
353 		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
354 			   "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
355 			   p_eqe->opcode,
356 			   p_eqe->protocol_id,
357 			   p_eqe->reserved0,
358 			   le16_to_cpu(p_eqe->echo),
359 			   p_eqe->fw_return_code,
360 			   p_eqe->flags);
361 
362 		if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
363 			if (qed_async_event_completion(p_hwfn, p_eqe))
364 				rc = -EINVAL;
365 		} else if (qed_spq_completion(p_hwfn,
366 					      p_eqe->echo,
367 					      p_eqe->fw_return_code,
368 					      &p_eqe->data)) {
369 			rc = -EINVAL;
370 		}
371 
372 		qed_chain_recycle_consumed(p_chain);
373 	}
374 
375 	qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
376 
377 	return rc;
378 }
379 
380 struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
381 {
382 	struct qed_eq *p_eq;
383 
384 	/* Allocate EQ struct */
385 	p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
386 	if (!p_eq)
387 		return NULL;
388 
389 	/* Allocate and initialize EQ chain*/
390 	if (qed_chain_alloc(p_hwfn->cdev,
391 			    QED_CHAIN_USE_TO_PRODUCE,
392 			    QED_CHAIN_MODE_PBL,
393 			    QED_CHAIN_CNT_TYPE_U16,
394 			    num_elem,
395 			    sizeof(union event_ring_element),
396 			    &p_eq->chain))
397 		goto eq_allocate_fail;
398 
399 	/* register EQ completion on the SP SB */
400 	qed_int_register_cb(p_hwfn, qed_eq_completion,
401 			    p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
402 
403 	return p_eq;
404 
405 eq_allocate_fail:
406 	qed_eq_free(p_hwfn, p_eq);
407 	return NULL;
408 }
409 
410 void qed_eq_setup(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
411 {
412 	qed_chain_reset(&p_eq->chain);
413 }
414 
415 void qed_eq_free(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
416 {
417 	if (!p_eq)
418 		return;
419 	qed_chain_free(p_hwfn->cdev, &p_eq->chain);
420 	kfree(p_eq);
421 }
422 
423 /***************************************************************************
424 * CQE API - manipulate EQ functionality
425 ***************************************************************************/
426 static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
427 			      struct eth_slow_path_rx_cqe *cqe,
428 			      enum protocol_type protocol)
429 {
430 	if (IS_VF(p_hwfn->cdev))
431 		return 0;
432 
433 	/* @@@tmp - it's possible we'll eventually want to handle some
434 	 * actual commands that can arrive here, but for now this is only
435 	 * used to complete the ramrod using the echo value on the cqe
436 	 */
437 	return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
438 }
439 
440 int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
441 			   struct eth_slow_path_rx_cqe *cqe)
442 {
443 	int rc;
444 
445 	rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
446 	if (rc)
447 		DP_NOTICE(p_hwfn,
448 			  "Failed to handle RXQ CQE [cmd 0x%02x]\n",
449 			  cqe->ramrod_cmd_id);
450 
451 	return rc;
452 }
453 
454 /***************************************************************************
455 * Slow hwfn Queue (spq)
456 ***************************************************************************/
457 void qed_spq_setup(struct qed_hwfn *p_hwfn)
458 {
459 	struct qed_spq *p_spq = p_hwfn->p_spq;
460 	struct qed_spq_entry *p_virt = NULL;
461 	dma_addr_t p_phys = 0;
462 	u32 i, capacity;
463 
464 	INIT_LIST_HEAD(&p_spq->pending);
465 	INIT_LIST_HEAD(&p_spq->completion_pending);
466 	INIT_LIST_HEAD(&p_spq->free_pool);
467 	INIT_LIST_HEAD(&p_spq->unlimited_pending);
468 	spin_lock_init(&p_spq->lock);
469 
470 	/* SPQ empty pool */
471 	p_phys	= p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
472 	p_virt	= p_spq->p_virt;
473 
474 	capacity = qed_chain_get_capacity(&p_spq->chain);
475 	for (i = 0; i < capacity; i++) {
476 		DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
477 
478 		list_add_tail(&p_virt->list, &p_spq->free_pool);
479 
480 		p_virt++;
481 		p_phys += sizeof(struct qed_spq_entry);
482 	}
483 
484 	/* Statistics */
485 	p_spq->normal_count		= 0;
486 	p_spq->comp_count		= 0;
487 	p_spq->comp_sent_count		= 0;
488 	p_spq->unlimited_pending_count	= 0;
489 
490 	bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
491 	p_spq->comp_bitmap_idx = 0;
492 
493 	/* SPQ cid, cannot fail */
494 	qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
495 	qed_spq_hw_initialize(p_hwfn, p_spq);
496 
497 	/* reset the chain itself */
498 	qed_chain_reset(&p_spq->chain);
499 }
500 
501 int qed_spq_alloc(struct qed_hwfn *p_hwfn)
502 {
503 	struct qed_spq_entry *p_virt = NULL;
504 	struct qed_spq *p_spq = NULL;
505 	dma_addr_t p_phys = 0;
506 	u32 capacity;
507 
508 	/* SPQ struct */
509 	p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
510 	if (!p_spq)
511 		return -ENOMEM;
512 
513 	/* SPQ ring  */
514 	if (qed_chain_alloc(p_hwfn->cdev,
515 			    QED_CHAIN_USE_TO_PRODUCE,
516 			    QED_CHAIN_MODE_SINGLE,
517 			    QED_CHAIN_CNT_TYPE_U16,
518 			    0,   /* N/A when the mode is SINGLE */
519 			    sizeof(struct slow_path_element),
520 			    &p_spq->chain))
521 		goto spq_allocate_fail;
522 
523 	/* allocate and fill the SPQ elements (incl. ramrod data list) */
524 	capacity = qed_chain_get_capacity(&p_spq->chain);
525 	p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
526 				    capacity * sizeof(struct qed_spq_entry),
527 				    &p_phys, GFP_KERNEL);
528 	if (!p_virt)
529 		goto spq_allocate_fail;
530 
531 	p_spq->p_virt = p_virt;
532 	p_spq->p_phys = p_phys;
533 	p_hwfn->p_spq = p_spq;
534 
535 	return 0;
536 
537 spq_allocate_fail:
538 	qed_chain_free(p_hwfn->cdev, &p_spq->chain);
539 	kfree(p_spq);
540 	return -ENOMEM;
541 }
542 
543 void qed_spq_free(struct qed_hwfn *p_hwfn)
544 {
545 	struct qed_spq *p_spq = p_hwfn->p_spq;
546 	u32 capacity;
547 
548 	if (!p_spq)
549 		return;
550 
551 	if (p_spq->p_virt) {
552 		capacity = qed_chain_get_capacity(&p_spq->chain);
553 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
554 				  capacity *
555 				  sizeof(struct qed_spq_entry),
556 				  p_spq->p_virt, p_spq->p_phys);
557 	}
558 
559 	qed_chain_free(p_hwfn->cdev, &p_spq->chain);
560 	;
561 	kfree(p_spq);
562 }
563 
564 int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
565 {
566 	struct qed_spq *p_spq = p_hwfn->p_spq;
567 	struct qed_spq_entry *p_ent = NULL;
568 	int rc = 0;
569 
570 	spin_lock_bh(&p_spq->lock);
571 
572 	if (list_empty(&p_spq->free_pool)) {
573 		p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
574 		if (!p_ent) {
575 			DP_NOTICE(p_hwfn,
576 				  "Failed to allocate an SPQ entry for a pending ramrod\n");
577 			rc = -ENOMEM;
578 			goto out_unlock;
579 		}
580 		p_ent->queue = &p_spq->unlimited_pending;
581 	} else {
582 		p_ent = list_first_entry(&p_spq->free_pool,
583 					 struct qed_spq_entry, list);
584 		list_del(&p_ent->list);
585 		p_ent->queue = &p_spq->pending;
586 	}
587 
588 	*pp_ent = p_ent;
589 
590 out_unlock:
591 	spin_unlock_bh(&p_spq->lock);
592 	return rc;
593 }
594 
595 /* Locked variant; Should be called while the SPQ lock is taken */
596 static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
597 				   struct qed_spq_entry *p_ent)
598 {
599 	list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
600 }
601 
602 void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
603 {
604 	spin_lock_bh(&p_hwfn->p_spq->lock);
605 	__qed_spq_return_entry(p_hwfn, p_ent);
606 	spin_unlock_bh(&p_hwfn->p_spq->lock);
607 }
608 
609 /**
610  * @brief qed_spq_add_entry - adds a new entry to the pending
611  *        list. Should be used while lock is being held.
612  *
613  * Addes an entry to the pending list is there is room (en empty
614  * element is available in the free_pool), or else places the
615  * entry in the unlimited_pending pool.
616  *
617  * @param p_hwfn
618  * @param p_ent
619  * @param priority
620  *
621  * @return int
622  */
623 static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
624 			     struct qed_spq_entry *p_ent,
625 			     enum spq_priority priority)
626 {
627 	struct qed_spq *p_spq = p_hwfn->p_spq;
628 
629 	if (p_ent->queue == &p_spq->unlimited_pending) {
630 
631 		if (list_empty(&p_spq->free_pool)) {
632 			list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
633 			p_spq->unlimited_pending_count++;
634 
635 			return 0;
636 		} else {
637 			struct qed_spq_entry *p_en2;
638 
639 			p_en2 = list_first_entry(&p_spq->free_pool,
640 						 struct qed_spq_entry, list);
641 			list_del(&p_en2->list);
642 
643 			/* Copy the ring element physical pointer to the new
644 			 * entry, since we are about to override the entire ring
645 			 * entry and don't want to lose the pointer.
646 			 */
647 			p_ent->elem.data_ptr = p_en2->elem.data_ptr;
648 
649 			*p_en2 = *p_ent;
650 
651 			/* EBLOCK responsible to free the allocated p_ent */
652 			if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
653 				kfree(p_ent);
654 
655 			p_ent = p_en2;
656 		}
657 	}
658 
659 	/* entry is to be placed in 'pending' queue */
660 	switch (priority) {
661 	case QED_SPQ_PRIORITY_NORMAL:
662 		list_add_tail(&p_ent->list, &p_spq->pending);
663 		p_spq->normal_count++;
664 		break;
665 	case QED_SPQ_PRIORITY_HIGH:
666 		list_add(&p_ent->list, &p_spq->pending);
667 		p_spq->high_count++;
668 		break;
669 	default:
670 		return -EINVAL;
671 	}
672 
673 	return 0;
674 }
675 
676 /***************************************************************************
677 * Accessor
678 ***************************************************************************/
679 u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
680 {
681 	if (!p_hwfn->p_spq)
682 		return 0xffffffff;      /* illegal */
683 	return p_hwfn->p_spq->cid;
684 }
685 
686 /***************************************************************************
687 * Posting new Ramrods
688 ***************************************************************************/
689 static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
690 			     struct list_head *head, u32 keep_reserve)
691 {
692 	struct qed_spq *p_spq = p_hwfn->p_spq;
693 	int rc;
694 
695 	while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
696 	       !list_empty(head)) {
697 		struct qed_spq_entry *p_ent =
698 			list_first_entry(head, struct qed_spq_entry, list);
699 		list_del(&p_ent->list);
700 		list_add_tail(&p_ent->list, &p_spq->completion_pending);
701 		p_spq->comp_sent_count++;
702 
703 		rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
704 		if (rc) {
705 			list_del(&p_ent->list);
706 			__qed_spq_return_entry(p_hwfn, p_ent);
707 			return rc;
708 		}
709 	}
710 
711 	return 0;
712 }
713 
714 static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
715 {
716 	struct qed_spq *p_spq = p_hwfn->p_spq;
717 	struct qed_spq_entry *p_ent = NULL;
718 
719 	while (!list_empty(&p_spq->free_pool)) {
720 		if (list_empty(&p_spq->unlimited_pending))
721 			break;
722 
723 		p_ent = list_first_entry(&p_spq->unlimited_pending,
724 					 struct qed_spq_entry, list);
725 		if (!p_ent)
726 			return -EINVAL;
727 
728 		list_del(&p_ent->list);
729 
730 		qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
731 	}
732 
733 	return qed_spq_post_list(p_hwfn, &p_spq->pending,
734 				 SPQ_HIGH_PRI_RESERVE_DEFAULT);
735 }
736 
737 int qed_spq_post(struct qed_hwfn *p_hwfn,
738 		 struct qed_spq_entry *p_ent, u8 *fw_return_code)
739 {
740 	int rc = 0;
741 	struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
742 	bool b_ret_ent = true;
743 
744 	if (!p_hwfn)
745 		return -EINVAL;
746 
747 	if (!p_ent) {
748 		DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
749 		return -EINVAL;
750 	}
751 
752 	/* Complete the entry */
753 	rc = qed_spq_fill_entry(p_hwfn, p_ent);
754 
755 	spin_lock_bh(&p_spq->lock);
756 
757 	/* Check return value after LOCK is taken for cleaner error flow */
758 	if (rc)
759 		goto spq_post_fail;
760 
761 	/* Add the request to the pending queue */
762 	rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
763 	if (rc)
764 		goto spq_post_fail;
765 
766 	rc = qed_spq_pend_post(p_hwfn);
767 	if (rc) {
768 		/* Since it's possible that pending failed for a different
769 		 * entry [although unlikely], the failed entry was already
770 		 * dealt with; No need to return it here.
771 		 */
772 		b_ret_ent = false;
773 		goto spq_post_fail;
774 	}
775 
776 	spin_unlock_bh(&p_spq->lock);
777 
778 	if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
779 		/* For entries in QED BLOCK mode, the completion code cannot
780 		 * perform the necessary cleanup - if it did, we couldn't
781 		 * access p_ent here to see whether it's successful or not.
782 		 * Thus, after gaining the answer perform the cleanup here.
783 		 */
784 		rc = qed_spq_block(p_hwfn, p_ent, fw_return_code,
785 				   p_ent->queue == &p_spq->unlimited_pending);
786 
787 		if (p_ent->queue == &p_spq->unlimited_pending) {
788 			/* This is an allocated p_ent which does not need to
789 			 * return to pool.
790 			 */
791 			kfree(p_ent);
792 			return rc;
793 		}
794 
795 		if (rc)
796 			goto spq_post_fail2;
797 
798 		/* return to pool */
799 		qed_spq_return_entry(p_hwfn, p_ent);
800 	}
801 	return rc;
802 
803 spq_post_fail2:
804 	spin_lock_bh(&p_spq->lock);
805 	list_del(&p_ent->list);
806 	qed_chain_return_produced(&p_spq->chain);
807 
808 spq_post_fail:
809 	/* return to the free pool */
810 	if (b_ret_ent)
811 		__qed_spq_return_entry(p_hwfn, p_ent);
812 	spin_unlock_bh(&p_spq->lock);
813 
814 	return rc;
815 }
816 
817 int qed_spq_completion(struct qed_hwfn *p_hwfn,
818 		       __le16 echo,
819 		       u8 fw_return_code,
820 		       union event_ring_data *p_data)
821 {
822 	struct qed_spq		*p_spq;
823 	struct qed_spq_entry	*p_ent = NULL;
824 	struct qed_spq_entry	*tmp;
825 	struct qed_spq_entry	*found = NULL;
826 	int			rc;
827 
828 	if (!p_hwfn)
829 		return -EINVAL;
830 
831 	p_spq = p_hwfn->p_spq;
832 	if (!p_spq)
833 		return -EINVAL;
834 
835 	spin_lock_bh(&p_spq->lock);
836 	list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
837 		if (p_ent->elem.hdr.echo == echo) {
838 			u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
839 
840 			list_del(&p_ent->list);
841 
842 			/* Avoid overriding of SPQ entries when getting
843 			 * out-of-order completions, by marking the completions
844 			 * in a bitmap and increasing the chain consumer only
845 			 * for the first successive completed entries.
846 			 */
847 			__set_bit(pos, p_spq->p_comp_bitmap);
848 
849 			while (test_bit(p_spq->comp_bitmap_idx,
850 					p_spq->p_comp_bitmap)) {
851 				__clear_bit(p_spq->comp_bitmap_idx,
852 					    p_spq->p_comp_bitmap);
853 				p_spq->comp_bitmap_idx++;
854 				qed_chain_return_produced(&p_spq->chain);
855 			}
856 
857 			p_spq->comp_count++;
858 			found = p_ent;
859 			break;
860 		}
861 
862 		/* This is relatively uncommon - depends on scenarios
863 		 * which have mutliple per-PF sent ramrods.
864 		 */
865 		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
866 			   "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
867 			   le16_to_cpu(echo),
868 			   le16_to_cpu(p_ent->elem.hdr.echo));
869 	}
870 
871 	/* Release lock before callback, as callback may post
872 	 * an additional ramrod.
873 	 */
874 	spin_unlock_bh(&p_spq->lock);
875 
876 	if (!found) {
877 		DP_NOTICE(p_hwfn,
878 			  "Failed to find an entry this EQE [echo %04x] completes\n",
879 			  le16_to_cpu(echo));
880 		return -EEXIST;
881 	}
882 
883 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
884 		   "Complete EQE [echo %04x]: func %p cookie %p)\n",
885 		   le16_to_cpu(echo),
886 		   p_ent->comp_cb.function, p_ent->comp_cb.cookie);
887 	if (found->comp_cb.function)
888 		found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
889 					fw_return_code);
890 	else
891 		DP_VERBOSE(p_hwfn,
892 			   QED_MSG_SPQ,
893 			   "Got a completion without a callback function\n");
894 
895 	if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
896 	    (found->queue == &p_spq->unlimited_pending))
897 		/* EBLOCK  is responsible for returning its own entry into the
898 		 * free list, unless it originally added the entry into the
899 		 * unlimited pending list.
900 		 */
901 		qed_spq_return_entry(p_hwfn, found);
902 
903 	/* Attempt to post pending requests */
904 	spin_lock_bh(&p_spq->lock);
905 	rc = qed_spq_pend_post(p_hwfn);
906 	spin_unlock_bh(&p_spq->lock);
907 
908 	return rc;
909 }
910 
911 struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
912 {
913 	struct qed_consq *p_consq;
914 
915 	/* Allocate ConsQ struct */
916 	p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
917 	if (!p_consq)
918 		return NULL;
919 
920 	/* Allocate and initialize EQ chain*/
921 	if (qed_chain_alloc(p_hwfn->cdev,
922 			    QED_CHAIN_USE_TO_PRODUCE,
923 			    QED_CHAIN_MODE_PBL,
924 			    QED_CHAIN_CNT_TYPE_U16,
925 			    QED_CHAIN_PAGE_SIZE / 0x80,
926 			    0x80, &p_consq->chain))
927 		goto consq_allocate_fail;
928 
929 	return p_consq;
930 
931 consq_allocate_fail:
932 	qed_consq_free(p_hwfn, p_consq);
933 	return NULL;
934 }
935 
936 void qed_consq_setup(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
937 {
938 	qed_chain_reset(&p_consq->chain);
939 }
940 
941 void qed_consq_free(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
942 {
943 	if (!p_consq)
944 		return;
945 	qed_chain_free(p_hwfn->cdev, &p_consq->chain);
946 	kfree(p_consq);
947 }
948