1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <linux/io.h>
36 #include <linux/delay.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/list.h>
41 #include <linux/pci.h>
42 #include <linux/slab.h>
43 #include <linux/spinlock.h>
44 #include <linux/string.h>
45 #include "qed.h"
46 #include "qed_cxt.h"
47 #include "qed_dev_api.h"
48 #include "qed_hsi.h"
49 #include "qed_hw.h"
50 #include "qed_int.h"
51 #include "qed_iscsi.h"
52 #include "qed_mcp.h"
53 #include "qed_ooo.h"
54 #include "qed_reg_addr.h"
55 #include "qed_sp.h"
56 #include "qed_sriov.h"
57 #include "qed_rdma.h"
58 
59 /***************************************************************************
60 * Structures & Definitions
61 ***************************************************************************/
62 
63 #define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
64 
65 #define SPQ_BLOCK_DELAY_MAX_ITER        (10)
66 #define SPQ_BLOCK_DELAY_US              (10)
67 #define SPQ_BLOCK_SLEEP_MAX_ITER        (1000)
68 #define SPQ_BLOCK_SLEEP_MS              (5)
69 
70 /***************************************************************************
71 * Blocking Imp. (BLOCK/EBLOCK mode)
72 ***************************************************************************/
73 static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
74 				void *cookie,
75 				union event_ring_data *data, u8 fw_return_code)
76 {
77 	struct qed_spq_comp_done *comp_done;
78 
79 	comp_done = (struct qed_spq_comp_done *)cookie;
80 
81 	comp_done->fw_return_code = fw_return_code;
82 
83 	/* Make sure completion done is visible on waiting thread */
84 	smp_store_release(&comp_done->done, 0x1);
85 }
86 
87 static int __qed_spq_block(struct qed_hwfn *p_hwfn,
88 			   struct qed_spq_entry *p_ent,
89 			   u8 *p_fw_ret, bool sleep_between_iter)
90 {
91 	struct qed_spq_comp_done *comp_done;
92 	u32 iter_cnt;
93 
94 	comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
95 	iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
96 				      : SPQ_BLOCK_DELAY_MAX_ITER;
97 
98 	while (iter_cnt--) {
99 		/* Validate we receive completion update */
100 		if (smp_load_acquire(&comp_done->done) == 1) { /* ^^^ */
101 			if (p_fw_ret)
102 				*p_fw_ret = comp_done->fw_return_code;
103 			return 0;
104 		}
105 
106 		if (sleep_between_iter)
107 			msleep(SPQ_BLOCK_SLEEP_MS);
108 		else
109 			udelay(SPQ_BLOCK_DELAY_US);
110 	}
111 
112 	return -EBUSY;
113 }
114 
115 static int qed_spq_block(struct qed_hwfn *p_hwfn,
116 			 struct qed_spq_entry *p_ent,
117 			 u8 *p_fw_ret, bool skip_quick_poll)
118 {
119 	struct qed_spq_comp_done *comp_done;
120 	struct qed_ptt *p_ptt;
121 	int rc;
122 
123 	/* A relatively short polling period w/o sleeping, to allow the FW to
124 	 * complete the ramrod and thus possibly to avoid the following sleeps.
125 	 */
126 	if (!skip_quick_poll) {
127 		rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false);
128 		if (!rc)
129 			return 0;
130 	}
131 
132 	/* Move to polling with a sleeping period between iterations */
133 	rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
134 	if (!rc)
135 		return 0;
136 
137 	p_ptt = qed_ptt_acquire(p_hwfn);
138 	if (!p_ptt) {
139 		DP_NOTICE(p_hwfn, "ptt, failed to acquire\n");
140 		return -EAGAIN;
141 	}
142 
143 	DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
144 	rc = qed_mcp_drain(p_hwfn, p_ptt);
145 	qed_ptt_release(p_hwfn, p_ptt);
146 	if (rc) {
147 		DP_NOTICE(p_hwfn, "MCP drain failed\n");
148 		goto err;
149 	}
150 
151 	/* Retry after drain */
152 	rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
153 	if (!rc)
154 		return 0;
155 
156 	comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
157 	if (comp_done->done == 1) {
158 		if (p_fw_ret)
159 			*p_fw_ret = comp_done->fw_return_code;
160 		return 0;
161 	}
162 err:
163 	DP_NOTICE(p_hwfn,
164 		  "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
165 		  le32_to_cpu(p_ent->elem.hdr.cid),
166 		  p_ent->elem.hdr.cmd_id,
167 		  p_ent->elem.hdr.protocol_id,
168 		  le16_to_cpu(p_ent->elem.hdr.echo));
169 
170 	return -EBUSY;
171 }
172 
173 /***************************************************************************
174 * SPQ entries inner API
175 ***************************************************************************/
176 static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
177 			      struct qed_spq_entry *p_ent)
178 {
179 	p_ent->flags = 0;
180 
181 	switch (p_ent->comp_mode) {
182 	case QED_SPQ_MODE_EBLOCK:
183 	case QED_SPQ_MODE_BLOCK:
184 		p_ent->comp_cb.function = qed_spq_blocking_cb;
185 		break;
186 	case QED_SPQ_MODE_CB:
187 		break;
188 	default:
189 		DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
190 			  p_ent->comp_mode);
191 		return -EINVAL;
192 	}
193 
194 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
195 		   "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
196 		   p_ent->elem.hdr.cid,
197 		   p_ent->elem.hdr.cmd_id,
198 		   p_ent->elem.hdr.protocol_id,
199 		   p_ent->elem.data_ptr.hi,
200 		   p_ent->elem.data_ptr.lo,
201 		   D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
202 			   QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
203 			   "MODE_CB"));
204 
205 	return 0;
206 }
207 
208 /***************************************************************************
209 * HSI access
210 ***************************************************************************/
211 static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
212 				  struct qed_spq *p_spq)
213 {
214 	struct e4_core_conn_context *p_cxt;
215 	struct qed_cxt_info cxt_info;
216 	u16 physical_q;
217 	int rc;
218 
219 	cxt_info.iid = p_spq->cid;
220 
221 	rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
222 
223 	if (rc < 0) {
224 		DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
225 			  p_spq->cid);
226 		return;
227 	}
228 
229 	p_cxt = cxt_info.p_cxt;
230 
231 	SET_FIELD(p_cxt->xstorm_ag_context.flags10,
232 		  E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
233 	SET_FIELD(p_cxt->xstorm_ag_context.flags1,
234 		  E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
235 	SET_FIELD(p_cxt->xstorm_ag_context.flags9,
236 		  E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
237 
238 	/* QM physical queue */
239 	physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
240 	p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q);
241 
242 	p_cxt->xstorm_st_context.spq_base_lo =
243 		DMA_LO_LE(p_spq->chain.p_phys_addr);
244 	p_cxt->xstorm_st_context.spq_base_hi =
245 		DMA_HI_LE(p_spq->chain.p_phys_addr);
246 
247 	DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
248 		       p_hwfn->p_consq->chain.p_phys_addr);
249 }
250 
251 static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
252 			   struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
253 {
254 	struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
255 	u16 echo = qed_chain_get_prod_idx(p_chain);
256 	struct slow_path_element	*elem;
257 	struct core_db_data		db;
258 
259 	p_ent->elem.hdr.echo	= cpu_to_le16(echo);
260 	elem = qed_chain_produce(p_chain);
261 	if (!elem) {
262 		DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
263 		return -EINVAL;
264 	}
265 
266 	*elem = p_ent->elem; /* struct assignment */
267 
268 	/* send a doorbell on the slow hwfn session */
269 	memset(&db, 0, sizeof(db));
270 	SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
271 	SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
272 	SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
273 		  DQ_XCM_CORE_SPQ_PROD_CMD);
274 	db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
275 	db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
276 
277 	/* make sure the SPQE is updated before the doorbell */
278 	wmb();
279 
280 	DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
281 
282 	/* make sure doorbell is rang */
283 	wmb();
284 
285 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
286 		   "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
287 		   qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
288 		   p_spq->cid, db.params, db.agg_flags,
289 		   qed_chain_get_prod_idx(p_chain));
290 
291 	return 0;
292 }
293 
294 /***************************************************************************
295 * Asynchronous events
296 ***************************************************************************/
297 static int
298 qed_async_event_completion(struct qed_hwfn *p_hwfn,
299 			   struct event_ring_entry *p_eqe)
300 {
301 	qed_spq_async_comp_cb cb;
302 
303 	if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE))
304 		return -EINVAL;
305 
306 	cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
307 	if (cb) {
308 		return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
309 			  &p_eqe->data, p_eqe->fw_return_code);
310 	} else {
311 		DP_NOTICE(p_hwfn,
312 			  "Unknown Async completion for protocol: %d\n",
313 			  p_eqe->protocol_id);
314 		return -EINVAL;
315 	}
316 }
317 
318 int
319 qed_spq_register_async_cb(struct qed_hwfn *p_hwfn,
320 			  enum protocol_type protocol_id,
321 			  qed_spq_async_comp_cb cb)
322 {
323 	if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
324 		return -EINVAL;
325 
326 	p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
327 	return 0;
328 }
329 
330 void
331 qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
332 			    enum protocol_type protocol_id)
333 {
334 	if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
335 		return;
336 
337 	p_hwfn->p_spq->async_comp_cb[protocol_id] = NULL;
338 }
339 
340 /***************************************************************************
341 * EQ API
342 ***************************************************************************/
343 void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
344 {
345 	u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
346 		   USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
347 
348 	REG_WR16(p_hwfn, addr, prod);
349 
350 	/* keep prod updates ordered */
351 	mmiowb();
352 }
353 
354 int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
355 {
356 	struct qed_eq *p_eq = cookie;
357 	struct qed_chain *p_chain = &p_eq->chain;
358 	int rc = 0;
359 
360 	/* take a snapshot of the FW consumer */
361 	u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
362 
363 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
364 
365 	/* Need to guarantee the fw_cons index we use points to a usuable
366 	 * element (to comply with our chain), so our macros would comply
367 	 */
368 	if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
369 	    qed_chain_get_usable_per_page(p_chain))
370 		fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
371 
372 	/* Complete current segment of eq entries */
373 	while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
374 		struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
375 
376 		if (!p_eqe) {
377 			rc = -EINVAL;
378 			break;
379 		}
380 
381 		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
382 			   "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
383 			   p_eqe->opcode,
384 			   p_eqe->protocol_id,
385 			   p_eqe->reserved0,
386 			   le16_to_cpu(p_eqe->echo),
387 			   p_eqe->fw_return_code,
388 			   p_eqe->flags);
389 
390 		if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
391 			if (qed_async_event_completion(p_hwfn, p_eqe))
392 				rc = -EINVAL;
393 		} else if (qed_spq_completion(p_hwfn,
394 					      p_eqe->echo,
395 					      p_eqe->fw_return_code,
396 					      &p_eqe->data)) {
397 			rc = -EINVAL;
398 		}
399 
400 		qed_chain_recycle_consumed(p_chain);
401 	}
402 
403 	qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
404 
405 	return rc;
406 }
407 
408 int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
409 {
410 	struct qed_eq *p_eq;
411 
412 	/* Allocate EQ struct */
413 	p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
414 	if (!p_eq)
415 		return -ENOMEM;
416 
417 	/* Allocate and initialize EQ chain*/
418 	if (qed_chain_alloc(p_hwfn->cdev,
419 			    QED_CHAIN_USE_TO_PRODUCE,
420 			    QED_CHAIN_MODE_PBL,
421 			    QED_CHAIN_CNT_TYPE_U16,
422 			    num_elem,
423 			    sizeof(union event_ring_element),
424 			    &p_eq->chain, NULL))
425 		goto eq_allocate_fail;
426 
427 	/* register EQ completion on the SP SB */
428 	qed_int_register_cb(p_hwfn, qed_eq_completion,
429 			    p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
430 
431 	p_hwfn->p_eq = p_eq;
432 	return 0;
433 
434 eq_allocate_fail:
435 	kfree(p_eq);
436 	return -ENOMEM;
437 }
438 
439 void qed_eq_setup(struct qed_hwfn *p_hwfn)
440 {
441 	qed_chain_reset(&p_hwfn->p_eq->chain);
442 }
443 
444 void qed_eq_free(struct qed_hwfn *p_hwfn)
445 {
446 	if (!p_hwfn->p_eq)
447 		return;
448 
449 	qed_chain_free(p_hwfn->cdev, &p_hwfn->p_eq->chain);
450 
451 	kfree(p_hwfn->p_eq);
452 	p_hwfn->p_eq = NULL;
453 }
454 
455 /***************************************************************************
456 * CQE API - manipulate EQ functionality
457 ***************************************************************************/
458 static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
459 			      struct eth_slow_path_rx_cqe *cqe,
460 			      enum protocol_type protocol)
461 {
462 	if (IS_VF(p_hwfn->cdev))
463 		return 0;
464 
465 	/* @@@tmp - it's possible we'll eventually want to handle some
466 	 * actual commands that can arrive here, but for now this is only
467 	 * used to complete the ramrod using the echo value on the cqe
468 	 */
469 	return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
470 }
471 
472 int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
473 			   struct eth_slow_path_rx_cqe *cqe)
474 {
475 	int rc;
476 
477 	rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
478 	if (rc)
479 		DP_NOTICE(p_hwfn,
480 			  "Failed to handle RXQ CQE [cmd 0x%02x]\n",
481 			  cqe->ramrod_cmd_id);
482 
483 	return rc;
484 }
485 
486 /***************************************************************************
487 * Slow hwfn Queue (spq)
488 ***************************************************************************/
489 void qed_spq_setup(struct qed_hwfn *p_hwfn)
490 {
491 	struct qed_spq *p_spq = p_hwfn->p_spq;
492 	struct qed_spq_entry *p_virt = NULL;
493 	dma_addr_t p_phys = 0;
494 	u32 i, capacity;
495 
496 	INIT_LIST_HEAD(&p_spq->pending);
497 	INIT_LIST_HEAD(&p_spq->completion_pending);
498 	INIT_LIST_HEAD(&p_spq->free_pool);
499 	INIT_LIST_HEAD(&p_spq->unlimited_pending);
500 	spin_lock_init(&p_spq->lock);
501 
502 	/* SPQ empty pool */
503 	p_phys	= p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
504 	p_virt	= p_spq->p_virt;
505 
506 	capacity = qed_chain_get_capacity(&p_spq->chain);
507 	for (i = 0; i < capacity; i++) {
508 		DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
509 
510 		list_add_tail(&p_virt->list, &p_spq->free_pool);
511 
512 		p_virt++;
513 		p_phys += sizeof(struct qed_spq_entry);
514 	}
515 
516 	/* Statistics */
517 	p_spq->normal_count		= 0;
518 	p_spq->comp_count		= 0;
519 	p_spq->comp_sent_count		= 0;
520 	p_spq->unlimited_pending_count	= 0;
521 
522 	bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
523 	p_spq->comp_bitmap_idx = 0;
524 
525 	/* SPQ cid, cannot fail */
526 	qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
527 	qed_spq_hw_initialize(p_hwfn, p_spq);
528 
529 	/* reset the chain itself */
530 	qed_chain_reset(&p_spq->chain);
531 }
532 
533 int qed_spq_alloc(struct qed_hwfn *p_hwfn)
534 {
535 	struct qed_spq_entry *p_virt = NULL;
536 	struct qed_spq *p_spq = NULL;
537 	dma_addr_t p_phys = 0;
538 	u32 capacity;
539 
540 	/* SPQ struct */
541 	p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
542 	if (!p_spq)
543 		return -ENOMEM;
544 
545 	/* SPQ ring  */
546 	if (qed_chain_alloc(p_hwfn->cdev,
547 			    QED_CHAIN_USE_TO_PRODUCE,
548 			    QED_CHAIN_MODE_SINGLE,
549 			    QED_CHAIN_CNT_TYPE_U16,
550 			    0,   /* N/A when the mode is SINGLE */
551 			    sizeof(struct slow_path_element),
552 			    &p_spq->chain, NULL))
553 		goto spq_allocate_fail;
554 
555 	/* allocate and fill the SPQ elements (incl. ramrod data list) */
556 	capacity = qed_chain_get_capacity(&p_spq->chain);
557 	p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
558 				    capacity * sizeof(struct qed_spq_entry),
559 				    &p_phys, GFP_KERNEL);
560 	if (!p_virt)
561 		goto spq_allocate_fail;
562 
563 	p_spq->p_virt = p_virt;
564 	p_spq->p_phys = p_phys;
565 	p_hwfn->p_spq = p_spq;
566 
567 	return 0;
568 
569 spq_allocate_fail:
570 	qed_chain_free(p_hwfn->cdev, &p_spq->chain);
571 	kfree(p_spq);
572 	return -ENOMEM;
573 }
574 
575 void qed_spq_free(struct qed_hwfn *p_hwfn)
576 {
577 	struct qed_spq *p_spq = p_hwfn->p_spq;
578 	u32 capacity;
579 
580 	if (!p_spq)
581 		return;
582 
583 	if (p_spq->p_virt) {
584 		capacity = qed_chain_get_capacity(&p_spq->chain);
585 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
586 				  capacity *
587 				  sizeof(struct qed_spq_entry),
588 				  p_spq->p_virt, p_spq->p_phys);
589 	}
590 
591 	qed_chain_free(p_hwfn->cdev, &p_spq->chain);
592 	kfree(p_spq);
593 	p_hwfn->p_spq = NULL;
594 }
595 
596 int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
597 {
598 	struct qed_spq *p_spq = p_hwfn->p_spq;
599 	struct qed_spq_entry *p_ent = NULL;
600 	int rc = 0;
601 
602 	spin_lock_bh(&p_spq->lock);
603 
604 	if (list_empty(&p_spq->free_pool)) {
605 		p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
606 		if (!p_ent) {
607 			DP_NOTICE(p_hwfn,
608 				  "Failed to allocate an SPQ entry for a pending ramrod\n");
609 			rc = -ENOMEM;
610 			goto out_unlock;
611 		}
612 		p_ent->queue = &p_spq->unlimited_pending;
613 	} else {
614 		p_ent = list_first_entry(&p_spq->free_pool,
615 					 struct qed_spq_entry, list);
616 		list_del(&p_ent->list);
617 		p_ent->queue = &p_spq->pending;
618 	}
619 
620 	*pp_ent = p_ent;
621 
622 out_unlock:
623 	spin_unlock_bh(&p_spq->lock);
624 	return rc;
625 }
626 
627 /* Locked variant; Should be called while the SPQ lock is taken */
628 static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
629 				   struct qed_spq_entry *p_ent)
630 {
631 	list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
632 }
633 
634 void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
635 {
636 	spin_lock_bh(&p_hwfn->p_spq->lock);
637 	__qed_spq_return_entry(p_hwfn, p_ent);
638 	spin_unlock_bh(&p_hwfn->p_spq->lock);
639 }
640 
641 /**
642  * @brief qed_spq_add_entry - adds a new entry to the pending
643  *        list. Should be used while lock is being held.
644  *
645  * Addes an entry to the pending list is there is room (en empty
646  * element is available in the free_pool), or else places the
647  * entry in the unlimited_pending pool.
648  *
649  * @param p_hwfn
650  * @param p_ent
651  * @param priority
652  *
653  * @return int
654  */
655 static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
656 			     struct qed_spq_entry *p_ent,
657 			     enum spq_priority priority)
658 {
659 	struct qed_spq *p_spq = p_hwfn->p_spq;
660 
661 	if (p_ent->queue == &p_spq->unlimited_pending) {
662 
663 		if (list_empty(&p_spq->free_pool)) {
664 			list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
665 			p_spq->unlimited_pending_count++;
666 
667 			return 0;
668 		} else {
669 			struct qed_spq_entry *p_en2;
670 
671 			p_en2 = list_first_entry(&p_spq->free_pool,
672 						 struct qed_spq_entry, list);
673 			list_del(&p_en2->list);
674 
675 			/* Copy the ring element physical pointer to the new
676 			 * entry, since we are about to override the entire ring
677 			 * entry and don't want to lose the pointer.
678 			 */
679 			p_ent->elem.data_ptr = p_en2->elem.data_ptr;
680 
681 			*p_en2 = *p_ent;
682 
683 			/* EBLOCK responsible to free the allocated p_ent */
684 			if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
685 				kfree(p_ent);
686 			else
687 				p_ent->post_ent = p_en2;
688 
689 			p_ent = p_en2;
690 		}
691 	}
692 
693 	/* entry is to be placed in 'pending' queue */
694 	switch (priority) {
695 	case QED_SPQ_PRIORITY_NORMAL:
696 		list_add_tail(&p_ent->list, &p_spq->pending);
697 		p_spq->normal_count++;
698 		break;
699 	case QED_SPQ_PRIORITY_HIGH:
700 		list_add(&p_ent->list, &p_spq->pending);
701 		p_spq->high_count++;
702 		break;
703 	default:
704 		return -EINVAL;
705 	}
706 
707 	return 0;
708 }
709 
710 /***************************************************************************
711 * Accessor
712 ***************************************************************************/
713 u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
714 {
715 	if (!p_hwfn->p_spq)
716 		return 0xffffffff;      /* illegal */
717 	return p_hwfn->p_spq->cid;
718 }
719 
720 /***************************************************************************
721 * Posting new Ramrods
722 ***************************************************************************/
723 static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
724 			     struct list_head *head, u32 keep_reserve)
725 {
726 	struct qed_spq *p_spq = p_hwfn->p_spq;
727 	int rc;
728 
729 	while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
730 	       !list_empty(head)) {
731 		struct qed_spq_entry *p_ent =
732 			list_first_entry(head, struct qed_spq_entry, list);
733 		list_move_tail(&p_ent->list, &p_spq->completion_pending);
734 		p_spq->comp_sent_count++;
735 
736 		rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
737 		if (rc) {
738 			list_del(&p_ent->list);
739 			__qed_spq_return_entry(p_hwfn, p_ent);
740 			return rc;
741 		}
742 	}
743 
744 	return 0;
745 }
746 
747 static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
748 {
749 	struct qed_spq *p_spq = p_hwfn->p_spq;
750 	struct qed_spq_entry *p_ent = NULL;
751 
752 	while (!list_empty(&p_spq->free_pool)) {
753 		if (list_empty(&p_spq->unlimited_pending))
754 			break;
755 
756 		p_ent = list_first_entry(&p_spq->unlimited_pending,
757 					 struct qed_spq_entry, list);
758 		if (!p_ent)
759 			return -EINVAL;
760 
761 		list_del(&p_ent->list);
762 
763 		qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
764 	}
765 
766 	return qed_spq_post_list(p_hwfn, &p_spq->pending,
767 				 SPQ_HIGH_PRI_RESERVE_DEFAULT);
768 }
769 
770 /* Avoid overriding of SPQ entries when getting out-of-order completions, by
771  * marking the completions in a bitmap and increasing the chain consumer only
772  * for the first successive completed entries.
773  */
774 static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo)
775 {
776 	u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
777 	struct qed_spq *p_spq = p_hwfn->p_spq;
778 
779 	__set_bit(pos, p_spq->p_comp_bitmap);
780 	while (test_bit(p_spq->comp_bitmap_idx,
781 			p_spq->p_comp_bitmap)) {
782 		__clear_bit(p_spq->comp_bitmap_idx,
783 			    p_spq->p_comp_bitmap);
784 		p_spq->comp_bitmap_idx++;
785 		qed_chain_return_produced(&p_spq->chain);
786 	}
787 }
788 
789 int qed_spq_post(struct qed_hwfn *p_hwfn,
790 		 struct qed_spq_entry *p_ent, u8 *fw_return_code)
791 {
792 	int rc = 0;
793 	struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
794 	bool b_ret_ent = true;
795 	bool eblock;
796 
797 	if (!p_hwfn)
798 		return -EINVAL;
799 
800 	if (!p_ent) {
801 		DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
802 		return -EINVAL;
803 	}
804 
805 	/* Complete the entry */
806 	rc = qed_spq_fill_entry(p_hwfn, p_ent);
807 
808 	spin_lock_bh(&p_spq->lock);
809 
810 	/* Check return value after LOCK is taken for cleaner error flow */
811 	if (rc)
812 		goto spq_post_fail;
813 
814 	/* Check if entry is in block mode before qed_spq_add_entry,
815 	 * which might kfree p_ent.
816 	 */
817 	eblock = (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK);
818 
819 	/* Add the request to the pending queue */
820 	rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
821 	if (rc)
822 		goto spq_post_fail;
823 
824 	rc = qed_spq_pend_post(p_hwfn);
825 	if (rc) {
826 		/* Since it's possible that pending failed for a different
827 		 * entry [although unlikely], the failed entry was already
828 		 * dealt with; No need to return it here.
829 		 */
830 		b_ret_ent = false;
831 		goto spq_post_fail;
832 	}
833 
834 	spin_unlock_bh(&p_spq->lock);
835 
836 	if (eblock) {
837 		/* For entries in QED BLOCK mode, the completion code cannot
838 		 * perform the necessary cleanup - if it did, we couldn't
839 		 * access p_ent here to see whether it's successful or not.
840 		 * Thus, after gaining the answer perform the cleanup here.
841 		 */
842 		rc = qed_spq_block(p_hwfn, p_ent, fw_return_code,
843 				   p_ent->queue == &p_spq->unlimited_pending);
844 
845 		if (p_ent->queue == &p_spq->unlimited_pending) {
846 			struct qed_spq_entry *p_post_ent = p_ent->post_ent;
847 
848 			kfree(p_ent);
849 
850 			/* Return the entry which was actually posted */
851 			p_ent = p_post_ent;
852 		}
853 
854 		if (rc)
855 			goto spq_post_fail2;
856 
857 		/* return to pool */
858 		qed_spq_return_entry(p_hwfn, p_ent);
859 	}
860 	return rc;
861 
862 spq_post_fail2:
863 	spin_lock_bh(&p_spq->lock);
864 	list_del(&p_ent->list);
865 	qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo);
866 
867 spq_post_fail:
868 	/* return to the free pool */
869 	if (b_ret_ent)
870 		__qed_spq_return_entry(p_hwfn, p_ent);
871 	spin_unlock_bh(&p_spq->lock);
872 
873 	return rc;
874 }
875 
876 int qed_spq_completion(struct qed_hwfn *p_hwfn,
877 		       __le16 echo,
878 		       u8 fw_return_code,
879 		       union event_ring_data *p_data)
880 {
881 	struct qed_spq		*p_spq;
882 	struct qed_spq_entry	*p_ent = NULL;
883 	struct qed_spq_entry	*tmp;
884 	struct qed_spq_entry	*found = NULL;
885 	int			rc;
886 
887 	if (!p_hwfn)
888 		return -EINVAL;
889 
890 	p_spq = p_hwfn->p_spq;
891 	if (!p_spq)
892 		return -EINVAL;
893 
894 	spin_lock_bh(&p_spq->lock);
895 	list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
896 		if (p_ent->elem.hdr.echo == echo) {
897 			list_del(&p_ent->list);
898 			qed_spq_comp_bmap_update(p_hwfn, echo);
899 			p_spq->comp_count++;
900 			found = p_ent;
901 			break;
902 		}
903 
904 		/* This is relatively uncommon - depends on scenarios
905 		 * which have mutliple per-PF sent ramrods.
906 		 */
907 		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
908 			   "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
909 			   le16_to_cpu(echo),
910 			   le16_to_cpu(p_ent->elem.hdr.echo));
911 	}
912 
913 	/* Release lock before callback, as callback may post
914 	 * an additional ramrod.
915 	 */
916 	spin_unlock_bh(&p_spq->lock);
917 
918 	if (!found) {
919 		DP_NOTICE(p_hwfn,
920 			  "Failed to find an entry this EQE [echo %04x] completes\n",
921 			  le16_to_cpu(echo));
922 		return -EEXIST;
923 	}
924 
925 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
926 		   "Complete EQE [echo %04x]: func %p cookie %p)\n",
927 		   le16_to_cpu(echo),
928 		   p_ent->comp_cb.function, p_ent->comp_cb.cookie);
929 	if (found->comp_cb.function)
930 		found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
931 					fw_return_code);
932 	else
933 		DP_VERBOSE(p_hwfn,
934 			   QED_MSG_SPQ,
935 			   "Got a completion without a callback function\n");
936 
937 	if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
938 		/* EBLOCK  is responsible for returning its own entry into the
939 		 * free list.
940 		 */
941 		qed_spq_return_entry(p_hwfn, found);
942 
943 	/* Attempt to post pending requests */
944 	spin_lock_bh(&p_spq->lock);
945 	rc = qed_spq_pend_post(p_hwfn);
946 	spin_unlock_bh(&p_spq->lock);
947 
948 	return rc;
949 }
950 
951 int qed_consq_alloc(struct qed_hwfn *p_hwfn)
952 {
953 	struct qed_consq *p_consq;
954 
955 	/* Allocate ConsQ struct */
956 	p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
957 	if (!p_consq)
958 		return -ENOMEM;
959 
960 	/* Allocate and initialize EQ chain*/
961 	if (qed_chain_alloc(p_hwfn->cdev,
962 			    QED_CHAIN_USE_TO_PRODUCE,
963 			    QED_CHAIN_MODE_PBL,
964 			    QED_CHAIN_CNT_TYPE_U16,
965 			    QED_CHAIN_PAGE_SIZE / 0x80,
966 			    0x80, &p_consq->chain, NULL))
967 		goto consq_allocate_fail;
968 
969 	p_hwfn->p_consq = p_consq;
970 	return 0;
971 
972 consq_allocate_fail:
973 	kfree(p_consq);
974 	return -ENOMEM;
975 }
976 
977 void qed_consq_setup(struct qed_hwfn *p_hwfn)
978 {
979 	qed_chain_reset(&p_hwfn->p_consq->chain);
980 }
981 
982 void qed_consq_free(struct qed_hwfn *p_hwfn)
983 {
984 	if (!p_hwfn->p_consq)
985 		return;
986 
987 	qed_chain_free(p_hwfn->cdev, &p_hwfn->p_consq->chain);
988 
989 	kfree(p_hwfn->p_consq);
990 	p_hwfn->p_consq = NULL;
991 }
992