1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8 
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
11 #include <linux/io.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/string.h>
21 #include "qed.h"
22 #include "qed_cxt.h"
23 #include "qed_dev_api.h"
24 #include "qed_hsi.h"
25 #include "qed_hw.h"
26 #include "qed_int.h"
27 #include "qed_mcp.h"
28 #include "qed_reg_addr.h"
29 #include "qed_sp.h"
30 #include "qed_sriov.h"
31 
32 /***************************************************************************
33 * Structures & Definitions
34 ***************************************************************************/
35 
36 #define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
37 #define SPQ_BLOCK_SLEEP_LENGTH          (1000)
38 
39 /***************************************************************************
40 * Blocking Imp. (BLOCK/EBLOCK mode)
41 ***************************************************************************/
42 static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
43 				void *cookie,
44 				union event_ring_data *data,
45 				u8 fw_return_code)
46 {
47 	struct qed_spq_comp_done *comp_done;
48 
49 	comp_done = (struct qed_spq_comp_done *)cookie;
50 
51 	comp_done->done			= 0x1;
52 	comp_done->fw_return_code	= fw_return_code;
53 
54 	/* make update visible to waiting thread */
55 	smp_wmb();
56 }
57 
58 static int qed_spq_block(struct qed_hwfn *p_hwfn,
59 			 struct qed_spq_entry *p_ent,
60 			 u8 *p_fw_ret)
61 {
62 	int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
63 	struct qed_spq_comp_done *comp_done;
64 	int rc;
65 
66 	comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
67 	while (sleep_count) {
68 		/* validate we receive completion update */
69 		smp_rmb();
70 		if (comp_done->done == 1) {
71 			if (p_fw_ret)
72 				*p_fw_ret = comp_done->fw_return_code;
73 			return 0;
74 		}
75 		usleep_range(5000, 10000);
76 		sleep_count--;
77 	}
78 
79 	DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
80 	rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
81 	if (rc != 0)
82 		DP_NOTICE(p_hwfn, "MCP drain failed\n");
83 
84 	/* Retry after drain */
85 	sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
86 	while (sleep_count) {
87 		/* validate we receive completion update */
88 		smp_rmb();
89 		if (comp_done->done == 1) {
90 			if (p_fw_ret)
91 				*p_fw_ret = comp_done->fw_return_code;
92 			return 0;
93 		}
94 		usleep_range(5000, 10000);
95 		sleep_count--;
96 	}
97 
98 	if (comp_done->done == 1) {
99 		if (p_fw_ret)
100 			*p_fw_ret = comp_done->fw_return_code;
101 		return 0;
102 	}
103 
104 	DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n");
105 
106 	return -EBUSY;
107 }
108 
109 /***************************************************************************
110 * SPQ entries inner API
111 ***************************************************************************/
112 static int
113 qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
114 		   struct qed_spq_entry *p_ent)
115 {
116 	p_ent->flags = 0;
117 
118 	switch (p_ent->comp_mode) {
119 	case QED_SPQ_MODE_EBLOCK:
120 	case QED_SPQ_MODE_BLOCK:
121 		p_ent->comp_cb.function = qed_spq_blocking_cb;
122 		break;
123 	case QED_SPQ_MODE_CB:
124 		break;
125 	default:
126 		DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
127 			  p_ent->comp_mode);
128 		return -EINVAL;
129 	}
130 
131 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
132 		   "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
133 		   p_ent->elem.hdr.cid,
134 		   p_ent->elem.hdr.cmd_id,
135 		   p_ent->elem.hdr.protocol_id,
136 		   p_ent->elem.data_ptr.hi,
137 		   p_ent->elem.data_ptr.lo,
138 		   D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
139 			   QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
140 			   "MODE_CB"));
141 
142 	return 0;
143 }
144 
145 /***************************************************************************
146 * HSI access
147 ***************************************************************************/
148 static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
149 				  struct qed_spq *p_spq)
150 {
151 	u16				pq;
152 	struct qed_cxt_info		cxt_info;
153 	struct core_conn_context	*p_cxt;
154 	union qed_qm_pq_params		pq_params;
155 	int				rc;
156 
157 	cxt_info.iid = p_spq->cid;
158 
159 	rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
160 
161 	if (rc < 0) {
162 		DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
163 			  p_spq->cid);
164 		return;
165 	}
166 
167 	p_cxt = cxt_info.p_cxt;
168 
169 	SET_FIELD(p_cxt->xstorm_ag_context.flags10,
170 		  XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
171 	SET_FIELD(p_cxt->xstorm_ag_context.flags1,
172 		  XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
173 	SET_FIELD(p_cxt->xstorm_ag_context.flags9,
174 		  XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
175 
176 	/* QM physical queue */
177 	memset(&pq_params, 0, sizeof(pq_params));
178 	pq_params.core.tc = LB_TC;
179 	pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
180 	p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
181 
182 	p_cxt->xstorm_st_context.spq_base_lo =
183 		DMA_LO_LE(p_spq->chain.p_phys_addr);
184 	p_cxt->xstorm_st_context.spq_base_hi =
185 		DMA_HI_LE(p_spq->chain.p_phys_addr);
186 
187 	DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
188 		       p_hwfn->p_consq->chain.p_phys_addr);
189 }
190 
191 static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
192 			   struct qed_spq *p_spq,
193 			   struct qed_spq_entry *p_ent)
194 {
195 	struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
196 	u16 echo = qed_chain_get_prod_idx(p_chain);
197 	struct slow_path_element	*elem;
198 	struct core_db_data		db;
199 
200 	p_ent->elem.hdr.echo	= cpu_to_le16(echo);
201 	elem = qed_chain_produce(p_chain);
202 	if (!elem) {
203 		DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
204 		return -EINVAL;
205 	}
206 
207 	*elem = p_ent->elem; /* struct assignment */
208 
209 	/* send a doorbell on the slow hwfn session */
210 	memset(&db, 0, sizeof(db));
211 	SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
212 	SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
213 	SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
214 		  DQ_XCM_CORE_SPQ_PROD_CMD);
215 	db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
216 
217 	/* validate producer is up to-date */
218 	rmb();
219 
220 	db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
221 
222 	/* do not reorder */
223 	barrier();
224 
225 	DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
226 
227 	/* make sure doorbell is rang */
228 	mmiowb();
229 
230 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
231 		   "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
232 		   qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
233 		   p_spq->cid, db.params, db.agg_flags,
234 		   qed_chain_get_prod_idx(p_chain));
235 
236 	return 0;
237 }
238 
239 /***************************************************************************
240 * Asynchronous events
241 ***************************************************************************/
242 static int
243 qed_async_event_completion(struct qed_hwfn *p_hwfn,
244 			   struct event_ring_entry *p_eqe)
245 {
246 	switch (p_eqe->protocol_id) {
247 	case PROTOCOLID_COMMON:
248 		return qed_sriov_eqe_event(p_hwfn,
249 					   p_eqe->opcode,
250 					   p_eqe->echo, &p_eqe->data);
251 	default:
252 		DP_NOTICE(p_hwfn,
253 			  "Unknown Async completion for protocol: %d\n",
254 			  p_eqe->protocol_id);
255 		return -EINVAL;
256 	}
257 }
258 
259 /***************************************************************************
260 * EQ API
261 ***************************************************************************/
262 void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
263 			u16 prod)
264 {
265 	u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
266 		   USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
267 
268 	REG_WR16(p_hwfn, addr, prod);
269 
270 	/* keep prod updates ordered */
271 	mmiowb();
272 }
273 
274 int qed_eq_completion(struct qed_hwfn *p_hwfn,
275 		      void *cookie)
276 
277 {
278 	struct qed_eq *p_eq = cookie;
279 	struct qed_chain *p_chain = &p_eq->chain;
280 	int rc = 0;
281 
282 	/* take a snapshot of the FW consumer */
283 	u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
284 
285 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
286 
287 	/* Need to guarantee the fw_cons index we use points to a usuable
288 	 * element (to comply with our chain), so our macros would comply
289 	 */
290 	if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
291 	    qed_chain_get_usable_per_page(p_chain))
292 		fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
293 
294 	/* Complete current segment of eq entries */
295 	while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
296 		struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
297 
298 		if (!p_eqe) {
299 			rc = -EINVAL;
300 			break;
301 		}
302 
303 		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
304 			   "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
305 			   p_eqe->opcode,
306 			   p_eqe->protocol_id,
307 			   p_eqe->reserved0,
308 			   le16_to_cpu(p_eqe->echo),
309 			   p_eqe->fw_return_code,
310 			   p_eqe->flags);
311 
312 		if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
313 			if (qed_async_event_completion(p_hwfn, p_eqe))
314 				rc = -EINVAL;
315 		} else if (qed_spq_completion(p_hwfn,
316 					      p_eqe->echo,
317 					      p_eqe->fw_return_code,
318 					      &p_eqe->data)) {
319 			rc = -EINVAL;
320 		}
321 
322 		qed_chain_recycle_consumed(p_chain);
323 	}
324 
325 	qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
326 
327 	return rc;
328 }
329 
330 struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
331 			    u16 num_elem)
332 {
333 	struct qed_eq *p_eq;
334 
335 	/* Allocate EQ struct */
336 	p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
337 	if (!p_eq) {
338 		DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_eq'\n");
339 		return NULL;
340 	}
341 
342 	/* Allocate and initialize EQ chain*/
343 	if (qed_chain_alloc(p_hwfn->cdev,
344 			    QED_CHAIN_USE_TO_PRODUCE,
345 			    QED_CHAIN_MODE_PBL,
346 			    num_elem,
347 			    sizeof(union event_ring_element),
348 			    &p_eq->chain)) {
349 		DP_NOTICE(p_hwfn, "Failed to allocate eq chain\n");
350 		goto eq_allocate_fail;
351 	}
352 
353 	/* register EQ completion on the SP SB */
354 	qed_int_register_cb(p_hwfn,
355 			    qed_eq_completion,
356 			    p_eq,
357 			    &p_eq->eq_sb_index,
358 			    &p_eq->p_fw_cons);
359 
360 	return p_eq;
361 
362 eq_allocate_fail:
363 	qed_eq_free(p_hwfn, p_eq);
364 	return NULL;
365 }
366 
367 void qed_eq_setup(struct qed_hwfn *p_hwfn,
368 		  struct qed_eq *p_eq)
369 {
370 	qed_chain_reset(&p_eq->chain);
371 }
372 
373 void qed_eq_free(struct qed_hwfn *p_hwfn,
374 		 struct qed_eq *p_eq)
375 {
376 	if (!p_eq)
377 		return;
378 	qed_chain_free(p_hwfn->cdev, &p_eq->chain);
379 	kfree(p_eq);
380 }
381 
382 /***************************************************************************
383 * CQE API - manipulate EQ functionality
384 ***************************************************************************/
385 static int qed_cqe_completion(
386 	struct qed_hwfn *p_hwfn,
387 	struct eth_slow_path_rx_cqe *cqe,
388 	enum protocol_type protocol)
389 {
390 	if (IS_VF(p_hwfn->cdev))
391 		return 0;
392 
393 	/* @@@tmp - it's possible we'll eventually want to handle some
394 	 * actual commands that can arrive here, but for now this is only
395 	 * used to complete the ramrod using the echo value on the cqe
396 	 */
397 	return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
398 }
399 
400 int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
401 			   struct eth_slow_path_rx_cqe *cqe)
402 {
403 	int rc;
404 
405 	rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
406 	if (rc)
407 		DP_NOTICE(p_hwfn,
408 			  "Failed to handle RXQ CQE [cmd 0x%02x]\n",
409 			  cqe->ramrod_cmd_id);
410 
411 	return rc;
412 }
413 
414 /***************************************************************************
415 * Slow hwfn Queue (spq)
416 ***************************************************************************/
417 void qed_spq_setup(struct qed_hwfn *p_hwfn)
418 {
419 	struct qed_spq		*p_spq	= p_hwfn->p_spq;
420 	struct qed_spq_entry	*p_virt = NULL;
421 	dma_addr_t		p_phys	= 0;
422 	unsigned int		i	= 0;
423 
424 	INIT_LIST_HEAD(&p_spq->pending);
425 	INIT_LIST_HEAD(&p_spq->completion_pending);
426 	INIT_LIST_HEAD(&p_spq->free_pool);
427 	INIT_LIST_HEAD(&p_spq->unlimited_pending);
428 	spin_lock_init(&p_spq->lock);
429 
430 	/* SPQ empty pool */
431 	p_phys	= p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
432 	p_virt	= p_spq->p_virt;
433 
434 	for (i = 0; i < p_spq->chain.capacity; i++) {
435 		DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
436 
437 		list_add_tail(&p_virt->list, &p_spq->free_pool);
438 
439 		p_virt++;
440 		p_phys += sizeof(struct qed_spq_entry);
441 	}
442 
443 	/* Statistics */
444 	p_spq->normal_count		= 0;
445 	p_spq->comp_count		= 0;
446 	p_spq->comp_sent_count		= 0;
447 	p_spq->unlimited_pending_count	= 0;
448 
449 	bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
450 	p_spq->comp_bitmap_idx = 0;
451 
452 	/* SPQ cid, cannot fail */
453 	qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
454 	qed_spq_hw_initialize(p_hwfn, p_spq);
455 
456 	/* reset the chain itself */
457 	qed_chain_reset(&p_spq->chain);
458 }
459 
460 int qed_spq_alloc(struct qed_hwfn *p_hwfn)
461 {
462 	struct qed_spq		*p_spq	= NULL;
463 	dma_addr_t		p_phys	= 0;
464 	struct qed_spq_entry	*p_virt = NULL;
465 
466 	/* SPQ struct */
467 	p_spq =
468 		kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
469 	if (!p_spq) {
470 		DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n");
471 		return -ENOMEM;
472 	}
473 
474 	/* SPQ ring  */
475 	if (qed_chain_alloc(p_hwfn->cdev,
476 			    QED_CHAIN_USE_TO_PRODUCE,
477 			    QED_CHAIN_MODE_SINGLE,
478 			    0,   /* N/A when the mode is SINGLE */
479 			    sizeof(struct slow_path_element),
480 			    &p_spq->chain)) {
481 		DP_NOTICE(p_hwfn, "Failed to allocate spq chain\n");
482 		goto spq_allocate_fail;
483 	}
484 
485 	/* allocate and fill the SPQ elements (incl. ramrod data list) */
486 	p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
487 				    p_spq->chain.capacity *
488 				    sizeof(struct qed_spq_entry),
489 				    &p_phys,
490 				    GFP_KERNEL);
491 
492 	if (!p_virt)
493 		goto spq_allocate_fail;
494 
495 	p_spq->p_virt = p_virt;
496 	p_spq->p_phys = p_phys;
497 	p_hwfn->p_spq = p_spq;
498 
499 	return 0;
500 
501 spq_allocate_fail:
502 	qed_chain_free(p_hwfn->cdev, &p_spq->chain);
503 	kfree(p_spq);
504 	return -ENOMEM;
505 }
506 
507 void qed_spq_free(struct qed_hwfn *p_hwfn)
508 {
509 	struct qed_spq *p_spq = p_hwfn->p_spq;
510 
511 	if (!p_spq)
512 		return;
513 
514 	if (p_spq->p_virt)
515 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
516 				  p_spq->chain.capacity *
517 				  sizeof(struct qed_spq_entry),
518 				  p_spq->p_virt,
519 				  p_spq->p_phys);
520 
521 	qed_chain_free(p_hwfn->cdev, &p_spq->chain);
522 	;
523 	kfree(p_spq);
524 }
525 
526 int
527 qed_spq_get_entry(struct qed_hwfn *p_hwfn,
528 		  struct qed_spq_entry **pp_ent)
529 {
530 	struct qed_spq *p_spq = p_hwfn->p_spq;
531 	struct qed_spq_entry *p_ent = NULL;
532 	int rc = 0;
533 
534 	spin_lock_bh(&p_spq->lock);
535 
536 	if (list_empty(&p_spq->free_pool)) {
537 		p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
538 		if (!p_ent) {
539 			rc = -ENOMEM;
540 			goto out_unlock;
541 		}
542 		p_ent->queue = &p_spq->unlimited_pending;
543 	} else {
544 		p_ent = list_first_entry(&p_spq->free_pool,
545 					 struct qed_spq_entry,
546 					 list);
547 		list_del(&p_ent->list);
548 		p_ent->queue = &p_spq->pending;
549 	}
550 
551 	*pp_ent = p_ent;
552 
553 out_unlock:
554 	spin_unlock_bh(&p_spq->lock);
555 	return rc;
556 }
557 
558 /* Locked variant; Should be called while the SPQ lock is taken */
559 static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
560 				   struct qed_spq_entry *p_ent)
561 {
562 	list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
563 }
564 
565 void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
566 			  struct qed_spq_entry *p_ent)
567 {
568 	spin_lock_bh(&p_hwfn->p_spq->lock);
569 	__qed_spq_return_entry(p_hwfn, p_ent);
570 	spin_unlock_bh(&p_hwfn->p_spq->lock);
571 }
572 
573 /**
574  * @brief qed_spq_add_entry - adds a new entry to the pending
575  *        list. Should be used while lock is being held.
576  *
577  * Addes an entry to the pending list is there is room (en empty
578  * element is available in the free_pool), or else places the
579  * entry in the unlimited_pending pool.
580  *
581  * @param p_hwfn
582  * @param p_ent
583  * @param priority
584  *
585  * @return int
586  */
587 static int
588 qed_spq_add_entry(struct qed_hwfn *p_hwfn,
589 		  struct qed_spq_entry *p_ent,
590 		  enum spq_priority priority)
591 {
592 	struct qed_spq *p_spq = p_hwfn->p_spq;
593 
594 	if (p_ent->queue == &p_spq->unlimited_pending) {
595 
596 		if (list_empty(&p_spq->free_pool)) {
597 			list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
598 			p_spq->unlimited_pending_count++;
599 
600 			return 0;
601 		} else {
602 			struct qed_spq_entry *p_en2;
603 
604 			p_en2 = list_first_entry(&p_spq->free_pool,
605 						 struct qed_spq_entry,
606 						 list);
607 			list_del(&p_en2->list);
608 
609 			/* Copy the ring element physical pointer to the new
610 			 * entry, since we are about to override the entire ring
611 			 * entry and don't want to lose the pointer.
612 			 */
613 			p_ent->elem.data_ptr = p_en2->elem.data_ptr;
614 
615 			*p_en2 = *p_ent;
616 
617 			kfree(p_ent);
618 
619 			p_ent = p_en2;
620 		}
621 	}
622 
623 	/* entry is to be placed in 'pending' queue */
624 	switch (priority) {
625 	case QED_SPQ_PRIORITY_NORMAL:
626 		list_add_tail(&p_ent->list, &p_spq->pending);
627 		p_spq->normal_count++;
628 		break;
629 	case QED_SPQ_PRIORITY_HIGH:
630 		list_add(&p_ent->list, &p_spq->pending);
631 		p_spq->high_count++;
632 		break;
633 	default:
634 		return -EINVAL;
635 	}
636 
637 	return 0;
638 }
639 
640 /***************************************************************************
641 * Accessor
642 ***************************************************************************/
643 u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
644 {
645 	if (!p_hwfn->p_spq)
646 		return 0xffffffff;      /* illegal */
647 	return p_hwfn->p_spq->cid;
648 }
649 
650 /***************************************************************************
651 * Posting new Ramrods
652 ***************************************************************************/
653 static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
654 			     struct list_head *head,
655 			     u32 keep_reserve)
656 {
657 	struct qed_spq *p_spq = p_hwfn->p_spq;
658 	int rc;
659 
660 	while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
661 	       !list_empty(head)) {
662 		struct qed_spq_entry *p_ent =
663 			list_first_entry(head, struct qed_spq_entry, list);
664 		list_del(&p_ent->list);
665 		list_add_tail(&p_ent->list, &p_spq->completion_pending);
666 		p_spq->comp_sent_count++;
667 
668 		rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
669 		if (rc) {
670 			list_del(&p_ent->list);
671 			__qed_spq_return_entry(p_hwfn, p_ent);
672 			return rc;
673 		}
674 	}
675 
676 	return 0;
677 }
678 
679 static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
680 {
681 	struct qed_spq *p_spq = p_hwfn->p_spq;
682 	struct qed_spq_entry *p_ent = NULL;
683 
684 	while (!list_empty(&p_spq->free_pool)) {
685 		if (list_empty(&p_spq->unlimited_pending))
686 			break;
687 
688 		p_ent = list_first_entry(&p_spq->unlimited_pending,
689 					 struct qed_spq_entry,
690 					 list);
691 		if (!p_ent)
692 			return -EINVAL;
693 
694 		list_del(&p_ent->list);
695 
696 		qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
697 	}
698 
699 	return qed_spq_post_list(p_hwfn, &p_spq->pending,
700 				 SPQ_HIGH_PRI_RESERVE_DEFAULT);
701 }
702 
703 int qed_spq_post(struct qed_hwfn *p_hwfn,
704 		 struct qed_spq_entry *p_ent,
705 		 u8 *fw_return_code)
706 {
707 	int rc = 0;
708 	struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
709 	bool b_ret_ent = true;
710 
711 	if (!p_hwfn)
712 		return -EINVAL;
713 
714 	if (!p_ent) {
715 		DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
716 		return -EINVAL;
717 	}
718 
719 	/* Complete the entry */
720 	rc = qed_spq_fill_entry(p_hwfn, p_ent);
721 
722 	spin_lock_bh(&p_spq->lock);
723 
724 	/* Check return value after LOCK is taken for cleaner error flow */
725 	if (rc)
726 		goto spq_post_fail;
727 
728 	/* Add the request to the pending queue */
729 	rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
730 	if (rc)
731 		goto spq_post_fail;
732 
733 	rc = qed_spq_pend_post(p_hwfn);
734 	if (rc) {
735 		/* Since it's possible that pending failed for a different
736 		 * entry [although unlikely], the failed entry was already
737 		 * dealt with; No need to return it here.
738 		 */
739 		b_ret_ent = false;
740 		goto spq_post_fail;
741 	}
742 
743 	spin_unlock_bh(&p_spq->lock);
744 
745 	if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
746 		/* For entries in QED BLOCK mode, the completion code cannot
747 		 * perform the necessary cleanup - if it did, we couldn't
748 		 * access p_ent here to see whether it's successful or not.
749 		 * Thus, after gaining the answer perform the cleanup here.
750 		 */
751 		rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
752 		if (rc)
753 			goto spq_post_fail2;
754 
755 		/* return to pool */
756 		qed_spq_return_entry(p_hwfn, p_ent);
757 	}
758 	return rc;
759 
760 spq_post_fail2:
761 	spin_lock_bh(&p_spq->lock);
762 	list_del(&p_ent->list);
763 	qed_chain_return_produced(&p_spq->chain);
764 
765 spq_post_fail:
766 	/* return to the free pool */
767 	if (b_ret_ent)
768 		__qed_spq_return_entry(p_hwfn, p_ent);
769 	spin_unlock_bh(&p_spq->lock);
770 
771 	return rc;
772 }
773 
774 int qed_spq_completion(struct qed_hwfn *p_hwfn,
775 		       __le16 echo,
776 		       u8 fw_return_code,
777 		       union event_ring_data *p_data)
778 {
779 	struct qed_spq		*p_spq;
780 	struct qed_spq_entry	*p_ent = NULL;
781 	struct qed_spq_entry	*tmp;
782 	struct qed_spq_entry	*found = NULL;
783 	int			rc;
784 
785 	if (!p_hwfn)
786 		return -EINVAL;
787 
788 	p_spq = p_hwfn->p_spq;
789 	if (!p_spq)
790 		return -EINVAL;
791 
792 	spin_lock_bh(&p_spq->lock);
793 	list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending,
794 				 list) {
795 		if (p_ent->elem.hdr.echo == echo) {
796 			u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
797 
798 			list_del(&p_ent->list);
799 
800 			/* Avoid overriding of SPQ entries when getting
801 			 * out-of-order completions, by marking the completions
802 			 * in a bitmap and increasing the chain consumer only
803 			 * for the first successive completed entries.
804 			 */
805 			bitmap_set(p_spq->p_comp_bitmap, pos, SPQ_RING_SIZE);
806 
807 			while (test_bit(p_spq->comp_bitmap_idx,
808 					p_spq->p_comp_bitmap)) {
809 				bitmap_clear(p_spq->p_comp_bitmap,
810 					     p_spq->comp_bitmap_idx,
811 					     SPQ_RING_SIZE);
812 				p_spq->comp_bitmap_idx++;
813 				qed_chain_return_produced(&p_spq->chain);
814 			}
815 
816 			p_spq->comp_count++;
817 			found = p_ent;
818 			break;
819 		}
820 
821 		/* This is relatively uncommon - depends on scenarios
822 		 * which have mutliple per-PF sent ramrods.
823 		 */
824 		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
825 			   "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
826 			   le16_to_cpu(echo),
827 			   le16_to_cpu(p_ent->elem.hdr.echo));
828 	}
829 
830 	/* Release lock before callback, as callback may post
831 	 * an additional ramrod.
832 	 */
833 	spin_unlock_bh(&p_spq->lock);
834 
835 	if (!found) {
836 		DP_NOTICE(p_hwfn,
837 			  "Failed to find an entry this EQE completes\n");
838 		return -EEXIST;
839 	}
840 
841 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Complete: func %p cookie %p)\n",
842 		   p_ent->comp_cb.function, p_ent->comp_cb.cookie);
843 	if (found->comp_cb.function)
844 		found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
845 					fw_return_code);
846 
847 	if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
848 		/* EBLOCK is responsible for freeing its own entry */
849 		qed_spq_return_entry(p_hwfn, found);
850 
851 	/* Attempt to post pending requests */
852 	spin_lock_bh(&p_spq->lock);
853 	rc = qed_spq_pend_post(p_hwfn);
854 	spin_unlock_bh(&p_spq->lock);
855 
856 	return rc;
857 }
858 
859 struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
860 {
861 	struct qed_consq *p_consq;
862 
863 	/* Allocate ConsQ struct */
864 	p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
865 	if (!p_consq) {
866 		DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_consq'\n");
867 		return NULL;
868 	}
869 
870 	/* Allocate and initialize EQ chain*/
871 	if (qed_chain_alloc(p_hwfn->cdev,
872 			    QED_CHAIN_USE_TO_PRODUCE,
873 			    QED_CHAIN_MODE_PBL,
874 			    QED_CHAIN_PAGE_SIZE / 0x80,
875 			    0x80,
876 			    &p_consq->chain)) {
877 		DP_NOTICE(p_hwfn, "Failed to allocate consq chain");
878 		goto consq_allocate_fail;
879 	}
880 
881 	return p_consq;
882 
883 consq_allocate_fail:
884 	qed_consq_free(p_hwfn, p_consq);
885 	return NULL;
886 }
887 
888 void qed_consq_setup(struct qed_hwfn *p_hwfn,
889 		     struct qed_consq *p_consq)
890 {
891 	qed_chain_reset(&p_consq->chain);
892 }
893 
894 void qed_consq_free(struct qed_hwfn *p_hwfn,
895 		    struct qed_consq *p_consq)
896 {
897 	if (!p_consq)
898 		return;
899 	qed_chain_free(p_hwfn->cdev, &p_consq->chain);
900 	kfree(p_consq);
901 }
902