1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <linux/io.h>
36 #include <linux/delay.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/list.h>
41 #include <linux/pci.h>
42 #include <linux/slab.h>
43 #include <linux/spinlock.h>
44 #include <linux/string.h>
45 #include "qed.h"
46 #include "qed_cxt.h"
47 #include "qed_dev_api.h"
48 #include "qed_hsi.h"
49 #include "qed_hw.h"
50 #include "qed_int.h"
51 #include "qed_iscsi.h"
52 #include "qed_mcp.h"
53 #include "qed_ooo.h"
54 #include "qed_reg_addr.h"
55 #include "qed_sp.h"
56 #include "qed_sriov.h"
57 #include "qed_rdma.h"
58 
59 /***************************************************************************
60 * Structures & Definitions
61 ***************************************************************************/
62 
63 #define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
64 
65 #define SPQ_BLOCK_DELAY_MAX_ITER        (10)
66 #define SPQ_BLOCK_DELAY_US              (10)
67 #define SPQ_BLOCK_SLEEP_MAX_ITER        (1000)
68 #define SPQ_BLOCK_SLEEP_MS              (5)
69 
70 /***************************************************************************
71 * Blocking Imp. (BLOCK/EBLOCK mode)
72 ***************************************************************************/
73 static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
74 				void *cookie,
75 				union event_ring_data *data, u8 fw_return_code)
76 {
77 	struct qed_spq_comp_done *comp_done;
78 
79 	comp_done = (struct qed_spq_comp_done *)cookie;
80 
81 	comp_done->fw_return_code = fw_return_code;
82 
83 	/* Make sure completion done is visible on waiting thread */
84 	smp_store_release(&comp_done->done, 0x1);
85 }
86 
87 static int __qed_spq_block(struct qed_hwfn *p_hwfn,
88 			   struct qed_spq_entry *p_ent,
89 			   u8 *p_fw_ret, bool sleep_between_iter)
90 {
91 	struct qed_spq_comp_done *comp_done;
92 	u32 iter_cnt;
93 
94 	comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
95 	iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
96 				      : SPQ_BLOCK_DELAY_MAX_ITER;
97 
98 	while (iter_cnt--) {
99 		/* Validate we receive completion update */
100 		if (READ_ONCE(comp_done->done) == 1) {
101 			/* Read updated FW return value */
102 			smp_read_barrier_depends();
103 			if (p_fw_ret)
104 				*p_fw_ret = comp_done->fw_return_code;
105 			return 0;
106 		}
107 
108 		if (sleep_between_iter)
109 			msleep(SPQ_BLOCK_SLEEP_MS);
110 		else
111 			udelay(SPQ_BLOCK_DELAY_US);
112 	}
113 
114 	return -EBUSY;
115 }
116 
117 static int qed_spq_block(struct qed_hwfn *p_hwfn,
118 			 struct qed_spq_entry *p_ent,
119 			 u8 *p_fw_ret, bool skip_quick_poll)
120 {
121 	struct qed_spq_comp_done *comp_done;
122 	struct qed_ptt *p_ptt;
123 	int rc;
124 
125 	/* A relatively short polling period w/o sleeping, to allow the FW to
126 	 * complete the ramrod and thus possibly to avoid the following sleeps.
127 	 */
128 	if (!skip_quick_poll) {
129 		rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false);
130 		if (!rc)
131 			return 0;
132 	}
133 
134 	/* Move to polling with a sleeping period between iterations */
135 	rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
136 	if (!rc)
137 		return 0;
138 
139 	p_ptt = qed_ptt_acquire(p_hwfn);
140 	if (!p_ptt) {
141 		DP_NOTICE(p_hwfn, "ptt, failed to acquire\n");
142 		return -EAGAIN;
143 	}
144 
145 	DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
146 	rc = qed_mcp_drain(p_hwfn, p_ptt);
147 	if (rc) {
148 		DP_NOTICE(p_hwfn, "MCP drain failed\n");
149 		goto err;
150 	}
151 
152 	/* Retry after drain */
153 	rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
154 	if (!rc)
155 		goto out;
156 
157 	comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
158 	if (comp_done->done == 1)
159 		if (p_fw_ret)
160 			*p_fw_ret = comp_done->fw_return_code;
161 out:
162 	qed_ptt_release(p_hwfn, p_ptt);
163 	return 0;
164 
165 err:
166 	qed_ptt_release(p_hwfn, p_ptt);
167 	DP_NOTICE(p_hwfn,
168 		  "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
169 		  le32_to_cpu(p_ent->elem.hdr.cid),
170 		  p_ent->elem.hdr.cmd_id,
171 		  p_ent->elem.hdr.protocol_id,
172 		  le16_to_cpu(p_ent->elem.hdr.echo));
173 
174 	return -EBUSY;
175 }
176 
177 /***************************************************************************
178 * SPQ entries inner API
179 ***************************************************************************/
180 static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
181 			      struct qed_spq_entry *p_ent)
182 {
183 	p_ent->flags = 0;
184 
185 	switch (p_ent->comp_mode) {
186 	case QED_SPQ_MODE_EBLOCK:
187 	case QED_SPQ_MODE_BLOCK:
188 		p_ent->comp_cb.function = qed_spq_blocking_cb;
189 		break;
190 	case QED_SPQ_MODE_CB:
191 		break;
192 	default:
193 		DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
194 			  p_ent->comp_mode);
195 		return -EINVAL;
196 	}
197 
198 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
199 		   "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
200 		   p_ent->elem.hdr.cid,
201 		   p_ent->elem.hdr.cmd_id,
202 		   p_ent->elem.hdr.protocol_id,
203 		   p_ent->elem.data_ptr.hi,
204 		   p_ent->elem.data_ptr.lo,
205 		   D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
206 			   QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
207 			   "MODE_CB"));
208 
209 	return 0;
210 }
211 
212 /***************************************************************************
213 * HSI access
214 ***************************************************************************/
215 static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
216 				  struct qed_spq *p_spq)
217 {
218 	struct core_conn_context *p_cxt;
219 	struct qed_cxt_info cxt_info;
220 	u16 physical_q;
221 	int rc;
222 
223 	cxt_info.iid = p_spq->cid;
224 
225 	rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
226 
227 	if (rc < 0) {
228 		DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
229 			  p_spq->cid);
230 		return;
231 	}
232 
233 	p_cxt = cxt_info.p_cxt;
234 
235 	SET_FIELD(p_cxt->xstorm_ag_context.flags10,
236 		  XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
237 	SET_FIELD(p_cxt->xstorm_ag_context.flags1,
238 		  XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
239 	SET_FIELD(p_cxt->xstorm_ag_context.flags9,
240 		  XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
241 
242 	/* QM physical queue */
243 	physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
244 	p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q);
245 
246 	p_cxt->xstorm_st_context.spq_base_lo =
247 		DMA_LO_LE(p_spq->chain.p_phys_addr);
248 	p_cxt->xstorm_st_context.spq_base_hi =
249 		DMA_HI_LE(p_spq->chain.p_phys_addr);
250 
251 	DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
252 		       p_hwfn->p_consq->chain.p_phys_addr);
253 }
254 
255 static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
256 			   struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
257 {
258 	struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
259 	u16 echo = qed_chain_get_prod_idx(p_chain);
260 	struct slow_path_element	*elem;
261 	struct core_db_data		db;
262 
263 	p_ent->elem.hdr.echo	= cpu_to_le16(echo);
264 	elem = qed_chain_produce(p_chain);
265 	if (!elem) {
266 		DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
267 		return -EINVAL;
268 	}
269 
270 	*elem = p_ent->elem; /* struct assignment */
271 
272 	/* send a doorbell on the slow hwfn session */
273 	memset(&db, 0, sizeof(db));
274 	SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
275 	SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
276 	SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
277 		  DQ_XCM_CORE_SPQ_PROD_CMD);
278 	db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
279 	db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
280 
281 	/* make sure the SPQE is updated before the doorbell */
282 	wmb();
283 
284 	DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
285 
286 	/* make sure doorbell is rang */
287 	wmb();
288 
289 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
290 		   "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
291 		   qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
292 		   p_spq->cid, db.params, db.agg_flags,
293 		   qed_chain_get_prod_idx(p_chain));
294 
295 	return 0;
296 }
297 
298 /***************************************************************************
299 * Asynchronous events
300 ***************************************************************************/
301 static int
302 qed_async_event_completion(struct qed_hwfn *p_hwfn,
303 			   struct event_ring_entry *p_eqe)
304 {
305 	qed_spq_async_comp_cb cb;
306 
307 	if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE))
308 		return -EINVAL;
309 
310 	cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
311 	if (cb) {
312 		return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
313 			  &p_eqe->data, p_eqe->fw_return_code);
314 	} else {
315 		DP_NOTICE(p_hwfn,
316 			  "Unknown Async completion for protocol: %d\n",
317 			  p_eqe->protocol_id);
318 		return -EINVAL;
319 	}
320 }
321 
322 int
323 qed_spq_register_async_cb(struct qed_hwfn *p_hwfn,
324 			  enum protocol_type protocol_id,
325 			  qed_spq_async_comp_cb cb)
326 {
327 	if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
328 		return -EINVAL;
329 
330 	p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
331 	return 0;
332 }
333 
334 void
335 qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
336 			    enum protocol_type protocol_id)
337 {
338 	if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
339 		return;
340 
341 	p_hwfn->p_spq->async_comp_cb[protocol_id] = NULL;
342 }
343 
344 /***************************************************************************
345 * EQ API
346 ***************************************************************************/
347 void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
348 {
349 	u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
350 		   USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
351 
352 	REG_WR16(p_hwfn, addr, prod);
353 
354 	/* keep prod updates ordered */
355 	mmiowb();
356 }
357 
358 int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
359 {
360 	struct qed_eq *p_eq = cookie;
361 	struct qed_chain *p_chain = &p_eq->chain;
362 	int rc = 0;
363 
364 	/* take a snapshot of the FW consumer */
365 	u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
366 
367 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
368 
369 	/* Need to guarantee the fw_cons index we use points to a usuable
370 	 * element (to comply with our chain), so our macros would comply
371 	 */
372 	if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
373 	    qed_chain_get_usable_per_page(p_chain))
374 		fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
375 
376 	/* Complete current segment of eq entries */
377 	while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
378 		struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
379 
380 		if (!p_eqe) {
381 			rc = -EINVAL;
382 			break;
383 		}
384 
385 		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
386 			   "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
387 			   p_eqe->opcode,
388 			   p_eqe->protocol_id,
389 			   p_eqe->reserved0,
390 			   le16_to_cpu(p_eqe->echo),
391 			   p_eqe->fw_return_code,
392 			   p_eqe->flags);
393 
394 		if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
395 			if (qed_async_event_completion(p_hwfn, p_eqe))
396 				rc = -EINVAL;
397 		} else if (qed_spq_completion(p_hwfn,
398 					      p_eqe->echo,
399 					      p_eqe->fw_return_code,
400 					      &p_eqe->data)) {
401 			rc = -EINVAL;
402 		}
403 
404 		qed_chain_recycle_consumed(p_chain);
405 	}
406 
407 	qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
408 
409 	return rc;
410 }
411 
412 int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
413 {
414 	struct qed_eq *p_eq;
415 
416 	/* Allocate EQ struct */
417 	p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
418 	if (!p_eq)
419 		return -ENOMEM;
420 
421 	/* Allocate and initialize EQ chain*/
422 	if (qed_chain_alloc(p_hwfn->cdev,
423 			    QED_CHAIN_USE_TO_PRODUCE,
424 			    QED_CHAIN_MODE_PBL,
425 			    QED_CHAIN_CNT_TYPE_U16,
426 			    num_elem,
427 			    sizeof(union event_ring_element),
428 			    &p_eq->chain, NULL))
429 		goto eq_allocate_fail;
430 
431 	/* register EQ completion on the SP SB */
432 	qed_int_register_cb(p_hwfn, qed_eq_completion,
433 			    p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
434 
435 	p_hwfn->p_eq = p_eq;
436 	return 0;
437 
438 eq_allocate_fail:
439 	kfree(p_eq);
440 	return -ENOMEM;
441 }
442 
443 void qed_eq_setup(struct qed_hwfn *p_hwfn)
444 {
445 	qed_chain_reset(&p_hwfn->p_eq->chain);
446 }
447 
448 void qed_eq_free(struct qed_hwfn *p_hwfn)
449 {
450 	if (!p_hwfn->p_eq)
451 		return;
452 
453 	qed_chain_free(p_hwfn->cdev, &p_hwfn->p_eq->chain);
454 
455 	kfree(p_hwfn->p_eq);
456 	p_hwfn->p_eq = NULL;
457 }
458 
459 /***************************************************************************
460 * CQE API - manipulate EQ functionality
461 ***************************************************************************/
462 static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
463 			      struct eth_slow_path_rx_cqe *cqe,
464 			      enum protocol_type protocol)
465 {
466 	if (IS_VF(p_hwfn->cdev))
467 		return 0;
468 
469 	/* @@@tmp - it's possible we'll eventually want to handle some
470 	 * actual commands that can arrive here, but for now this is only
471 	 * used to complete the ramrod using the echo value on the cqe
472 	 */
473 	return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
474 }
475 
476 int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
477 			   struct eth_slow_path_rx_cqe *cqe)
478 {
479 	int rc;
480 
481 	rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
482 	if (rc)
483 		DP_NOTICE(p_hwfn,
484 			  "Failed to handle RXQ CQE [cmd 0x%02x]\n",
485 			  cqe->ramrod_cmd_id);
486 
487 	return rc;
488 }
489 
490 /***************************************************************************
491 * Slow hwfn Queue (spq)
492 ***************************************************************************/
493 void qed_spq_setup(struct qed_hwfn *p_hwfn)
494 {
495 	struct qed_spq *p_spq = p_hwfn->p_spq;
496 	struct qed_spq_entry *p_virt = NULL;
497 	dma_addr_t p_phys = 0;
498 	u32 i, capacity;
499 
500 	INIT_LIST_HEAD(&p_spq->pending);
501 	INIT_LIST_HEAD(&p_spq->completion_pending);
502 	INIT_LIST_HEAD(&p_spq->free_pool);
503 	INIT_LIST_HEAD(&p_spq->unlimited_pending);
504 	spin_lock_init(&p_spq->lock);
505 
506 	/* SPQ empty pool */
507 	p_phys	= p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
508 	p_virt	= p_spq->p_virt;
509 
510 	capacity = qed_chain_get_capacity(&p_spq->chain);
511 	for (i = 0; i < capacity; i++) {
512 		DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
513 
514 		list_add_tail(&p_virt->list, &p_spq->free_pool);
515 
516 		p_virt++;
517 		p_phys += sizeof(struct qed_spq_entry);
518 	}
519 
520 	/* Statistics */
521 	p_spq->normal_count		= 0;
522 	p_spq->comp_count		= 0;
523 	p_spq->comp_sent_count		= 0;
524 	p_spq->unlimited_pending_count	= 0;
525 
526 	bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
527 	p_spq->comp_bitmap_idx = 0;
528 
529 	/* SPQ cid, cannot fail */
530 	qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
531 	qed_spq_hw_initialize(p_hwfn, p_spq);
532 
533 	/* reset the chain itself */
534 	qed_chain_reset(&p_spq->chain);
535 }
536 
537 int qed_spq_alloc(struct qed_hwfn *p_hwfn)
538 {
539 	struct qed_spq_entry *p_virt = NULL;
540 	struct qed_spq *p_spq = NULL;
541 	dma_addr_t p_phys = 0;
542 	u32 capacity;
543 
544 	/* SPQ struct */
545 	p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
546 	if (!p_spq)
547 		return -ENOMEM;
548 
549 	/* SPQ ring  */
550 	if (qed_chain_alloc(p_hwfn->cdev,
551 			    QED_CHAIN_USE_TO_PRODUCE,
552 			    QED_CHAIN_MODE_SINGLE,
553 			    QED_CHAIN_CNT_TYPE_U16,
554 			    0,   /* N/A when the mode is SINGLE */
555 			    sizeof(struct slow_path_element),
556 			    &p_spq->chain, NULL))
557 		goto spq_allocate_fail;
558 
559 	/* allocate and fill the SPQ elements (incl. ramrod data list) */
560 	capacity = qed_chain_get_capacity(&p_spq->chain);
561 	p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
562 				    capacity * sizeof(struct qed_spq_entry),
563 				    &p_phys, GFP_KERNEL);
564 	if (!p_virt)
565 		goto spq_allocate_fail;
566 
567 	p_spq->p_virt = p_virt;
568 	p_spq->p_phys = p_phys;
569 	p_hwfn->p_spq = p_spq;
570 
571 	return 0;
572 
573 spq_allocate_fail:
574 	qed_chain_free(p_hwfn->cdev, &p_spq->chain);
575 	kfree(p_spq);
576 	return -ENOMEM;
577 }
578 
579 void qed_spq_free(struct qed_hwfn *p_hwfn)
580 {
581 	struct qed_spq *p_spq = p_hwfn->p_spq;
582 	u32 capacity;
583 
584 	if (!p_spq)
585 		return;
586 
587 	if (p_spq->p_virt) {
588 		capacity = qed_chain_get_capacity(&p_spq->chain);
589 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
590 				  capacity *
591 				  sizeof(struct qed_spq_entry),
592 				  p_spq->p_virt, p_spq->p_phys);
593 	}
594 
595 	qed_chain_free(p_hwfn->cdev, &p_spq->chain);
596 	kfree(p_spq);
597 	p_hwfn->p_spq = NULL;
598 }
599 
600 int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
601 {
602 	struct qed_spq *p_spq = p_hwfn->p_spq;
603 	struct qed_spq_entry *p_ent = NULL;
604 	int rc = 0;
605 
606 	spin_lock_bh(&p_spq->lock);
607 
608 	if (list_empty(&p_spq->free_pool)) {
609 		p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
610 		if (!p_ent) {
611 			DP_NOTICE(p_hwfn,
612 				  "Failed to allocate an SPQ entry for a pending ramrod\n");
613 			rc = -ENOMEM;
614 			goto out_unlock;
615 		}
616 		p_ent->queue = &p_spq->unlimited_pending;
617 	} else {
618 		p_ent = list_first_entry(&p_spq->free_pool,
619 					 struct qed_spq_entry, list);
620 		list_del(&p_ent->list);
621 		p_ent->queue = &p_spq->pending;
622 	}
623 
624 	*pp_ent = p_ent;
625 
626 out_unlock:
627 	spin_unlock_bh(&p_spq->lock);
628 	return rc;
629 }
630 
631 /* Locked variant; Should be called while the SPQ lock is taken */
632 static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
633 				   struct qed_spq_entry *p_ent)
634 {
635 	list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
636 }
637 
638 void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
639 {
640 	spin_lock_bh(&p_hwfn->p_spq->lock);
641 	__qed_spq_return_entry(p_hwfn, p_ent);
642 	spin_unlock_bh(&p_hwfn->p_spq->lock);
643 }
644 
645 /**
646  * @brief qed_spq_add_entry - adds a new entry to the pending
647  *        list. Should be used while lock is being held.
648  *
649  * Addes an entry to the pending list is there is room (en empty
650  * element is available in the free_pool), or else places the
651  * entry in the unlimited_pending pool.
652  *
653  * @param p_hwfn
654  * @param p_ent
655  * @param priority
656  *
657  * @return int
658  */
659 static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
660 			     struct qed_spq_entry *p_ent,
661 			     enum spq_priority priority)
662 {
663 	struct qed_spq *p_spq = p_hwfn->p_spq;
664 
665 	if (p_ent->queue == &p_spq->unlimited_pending) {
666 
667 		if (list_empty(&p_spq->free_pool)) {
668 			list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
669 			p_spq->unlimited_pending_count++;
670 
671 			return 0;
672 		} else {
673 			struct qed_spq_entry *p_en2;
674 
675 			p_en2 = list_first_entry(&p_spq->free_pool,
676 						 struct qed_spq_entry, list);
677 			list_del(&p_en2->list);
678 
679 			/* Copy the ring element physical pointer to the new
680 			 * entry, since we are about to override the entire ring
681 			 * entry and don't want to lose the pointer.
682 			 */
683 			p_ent->elem.data_ptr = p_en2->elem.data_ptr;
684 
685 			*p_en2 = *p_ent;
686 
687 			/* EBLOCK responsible to free the allocated p_ent */
688 			if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
689 				kfree(p_ent);
690 
691 			p_ent = p_en2;
692 		}
693 	}
694 
695 	/* entry is to be placed in 'pending' queue */
696 	switch (priority) {
697 	case QED_SPQ_PRIORITY_NORMAL:
698 		list_add_tail(&p_ent->list, &p_spq->pending);
699 		p_spq->normal_count++;
700 		break;
701 	case QED_SPQ_PRIORITY_HIGH:
702 		list_add(&p_ent->list, &p_spq->pending);
703 		p_spq->high_count++;
704 		break;
705 	default:
706 		return -EINVAL;
707 	}
708 
709 	return 0;
710 }
711 
712 /***************************************************************************
713 * Accessor
714 ***************************************************************************/
715 u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
716 {
717 	if (!p_hwfn->p_spq)
718 		return 0xffffffff;      /* illegal */
719 	return p_hwfn->p_spq->cid;
720 }
721 
722 /***************************************************************************
723 * Posting new Ramrods
724 ***************************************************************************/
725 static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
726 			     struct list_head *head, u32 keep_reserve)
727 {
728 	struct qed_spq *p_spq = p_hwfn->p_spq;
729 	int rc;
730 
731 	while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
732 	       !list_empty(head)) {
733 		struct qed_spq_entry *p_ent =
734 			list_first_entry(head, struct qed_spq_entry, list);
735 		list_del(&p_ent->list);
736 		list_add_tail(&p_ent->list, &p_spq->completion_pending);
737 		p_spq->comp_sent_count++;
738 
739 		rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
740 		if (rc) {
741 			list_del(&p_ent->list);
742 			__qed_spq_return_entry(p_hwfn, p_ent);
743 			return rc;
744 		}
745 	}
746 
747 	return 0;
748 }
749 
750 static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
751 {
752 	struct qed_spq *p_spq = p_hwfn->p_spq;
753 	struct qed_spq_entry *p_ent = NULL;
754 
755 	while (!list_empty(&p_spq->free_pool)) {
756 		if (list_empty(&p_spq->unlimited_pending))
757 			break;
758 
759 		p_ent = list_first_entry(&p_spq->unlimited_pending,
760 					 struct qed_spq_entry, list);
761 		if (!p_ent)
762 			return -EINVAL;
763 
764 		list_del(&p_ent->list);
765 
766 		qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
767 	}
768 
769 	return qed_spq_post_list(p_hwfn, &p_spq->pending,
770 				 SPQ_HIGH_PRI_RESERVE_DEFAULT);
771 }
772 
773 int qed_spq_post(struct qed_hwfn *p_hwfn,
774 		 struct qed_spq_entry *p_ent, u8 *fw_return_code)
775 {
776 	int rc = 0;
777 	struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
778 	bool b_ret_ent = true;
779 
780 	if (!p_hwfn)
781 		return -EINVAL;
782 
783 	if (!p_ent) {
784 		DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
785 		return -EINVAL;
786 	}
787 
788 	/* Complete the entry */
789 	rc = qed_spq_fill_entry(p_hwfn, p_ent);
790 
791 	spin_lock_bh(&p_spq->lock);
792 
793 	/* Check return value after LOCK is taken for cleaner error flow */
794 	if (rc)
795 		goto spq_post_fail;
796 
797 	/* Add the request to the pending queue */
798 	rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
799 	if (rc)
800 		goto spq_post_fail;
801 
802 	rc = qed_spq_pend_post(p_hwfn);
803 	if (rc) {
804 		/* Since it's possible that pending failed for a different
805 		 * entry [although unlikely], the failed entry was already
806 		 * dealt with; No need to return it here.
807 		 */
808 		b_ret_ent = false;
809 		goto spq_post_fail;
810 	}
811 
812 	spin_unlock_bh(&p_spq->lock);
813 
814 	if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
815 		/* For entries in QED BLOCK mode, the completion code cannot
816 		 * perform the necessary cleanup - if it did, we couldn't
817 		 * access p_ent here to see whether it's successful or not.
818 		 * Thus, after gaining the answer perform the cleanup here.
819 		 */
820 		rc = qed_spq_block(p_hwfn, p_ent, fw_return_code,
821 				   p_ent->queue == &p_spq->unlimited_pending);
822 
823 		if (p_ent->queue == &p_spq->unlimited_pending) {
824 			/* This is an allocated p_ent which does not need to
825 			 * return to pool.
826 			 */
827 			kfree(p_ent);
828 			return rc;
829 		}
830 
831 		if (rc)
832 			goto spq_post_fail2;
833 
834 		/* return to pool */
835 		qed_spq_return_entry(p_hwfn, p_ent);
836 	}
837 	return rc;
838 
839 spq_post_fail2:
840 	spin_lock_bh(&p_spq->lock);
841 	list_del(&p_ent->list);
842 	qed_chain_return_produced(&p_spq->chain);
843 
844 spq_post_fail:
845 	/* return to the free pool */
846 	if (b_ret_ent)
847 		__qed_spq_return_entry(p_hwfn, p_ent);
848 	spin_unlock_bh(&p_spq->lock);
849 
850 	return rc;
851 }
852 
853 int qed_spq_completion(struct qed_hwfn *p_hwfn,
854 		       __le16 echo,
855 		       u8 fw_return_code,
856 		       union event_ring_data *p_data)
857 {
858 	struct qed_spq		*p_spq;
859 	struct qed_spq_entry	*p_ent = NULL;
860 	struct qed_spq_entry	*tmp;
861 	struct qed_spq_entry	*found = NULL;
862 	int			rc;
863 
864 	if (!p_hwfn)
865 		return -EINVAL;
866 
867 	p_spq = p_hwfn->p_spq;
868 	if (!p_spq)
869 		return -EINVAL;
870 
871 	spin_lock_bh(&p_spq->lock);
872 	list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
873 		if (p_ent->elem.hdr.echo == echo) {
874 			u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
875 
876 			list_del(&p_ent->list);
877 
878 			/* Avoid overriding of SPQ entries when getting
879 			 * out-of-order completions, by marking the completions
880 			 * in a bitmap and increasing the chain consumer only
881 			 * for the first successive completed entries.
882 			 */
883 			__set_bit(pos, p_spq->p_comp_bitmap);
884 
885 			while (test_bit(p_spq->comp_bitmap_idx,
886 					p_spq->p_comp_bitmap)) {
887 				__clear_bit(p_spq->comp_bitmap_idx,
888 					    p_spq->p_comp_bitmap);
889 				p_spq->comp_bitmap_idx++;
890 				qed_chain_return_produced(&p_spq->chain);
891 			}
892 
893 			p_spq->comp_count++;
894 			found = p_ent;
895 			break;
896 		}
897 
898 		/* This is relatively uncommon - depends on scenarios
899 		 * which have mutliple per-PF sent ramrods.
900 		 */
901 		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
902 			   "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
903 			   le16_to_cpu(echo),
904 			   le16_to_cpu(p_ent->elem.hdr.echo));
905 	}
906 
907 	/* Release lock before callback, as callback may post
908 	 * an additional ramrod.
909 	 */
910 	spin_unlock_bh(&p_spq->lock);
911 
912 	if (!found) {
913 		DP_NOTICE(p_hwfn,
914 			  "Failed to find an entry this EQE [echo %04x] completes\n",
915 			  le16_to_cpu(echo));
916 		return -EEXIST;
917 	}
918 
919 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
920 		   "Complete EQE [echo %04x]: func %p cookie %p)\n",
921 		   le16_to_cpu(echo),
922 		   p_ent->comp_cb.function, p_ent->comp_cb.cookie);
923 	if (found->comp_cb.function)
924 		found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
925 					fw_return_code);
926 	else
927 		DP_VERBOSE(p_hwfn,
928 			   QED_MSG_SPQ,
929 			   "Got a completion without a callback function\n");
930 
931 	if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
932 	    (found->queue == &p_spq->unlimited_pending))
933 		/* EBLOCK  is responsible for returning its own entry into the
934 		 * free list, unless it originally added the entry into the
935 		 * unlimited pending list.
936 		 */
937 		qed_spq_return_entry(p_hwfn, found);
938 
939 	/* Attempt to post pending requests */
940 	spin_lock_bh(&p_spq->lock);
941 	rc = qed_spq_pend_post(p_hwfn);
942 	spin_unlock_bh(&p_spq->lock);
943 
944 	return rc;
945 }
946 
947 int qed_consq_alloc(struct qed_hwfn *p_hwfn)
948 {
949 	struct qed_consq *p_consq;
950 
951 	/* Allocate ConsQ struct */
952 	p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
953 	if (!p_consq)
954 		return -ENOMEM;
955 
956 	/* Allocate and initialize EQ chain*/
957 	if (qed_chain_alloc(p_hwfn->cdev,
958 			    QED_CHAIN_USE_TO_PRODUCE,
959 			    QED_CHAIN_MODE_PBL,
960 			    QED_CHAIN_CNT_TYPE_U16,
961 			    QED_CHAIN_PAGE_SIZE / 0x80,
962 			    0x80, &p_consq->chain, NULL))
963 		goto consq_allocate_fail;
964 
965 	p_hwfn->p_consq = p_consq;
966 	return 0;
967 
968 consq_allocate_fail:
969 	kfree(p_consq);
970 	return -ENOMEM;
971 }
972 
973 void qed_consq_setup(struct qed_hwfn *p_hwfn)
974 {
975 	qed_chain_reset(&p_hwfn->p_consq->chain);
976 }
977 
978 void qed_consq_free(struct qed_hwfn *p_hwfn)
979 {
980 	if (!p_hwfn->p_consq)
981 		return;
982 
983 	qed_chain_free(p_hwfn->cdev, &p_hwfn->p_consq->chain);
984 
985 	kfree(p_hwfn->p_consq);
986 	p_hwfn->p_consq = NULL;
987 }
988