xref: /openbmc/linux/drivers/scsi/csiostor/csio_wr.c (revision ae213c44)
1 /*
2  * This file is part of the Chelsio FCoE driver for Linux.
3  *
4  * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/string.h>
37 #include <linux/compiler.h>
38 #include <linux/slab.h>
39 #include <asm/page.h>
40 #include <linux/cache.h>
41 
42 #include "t4_values.h"
43 #include "csio_hw.h"
44 #include "csio_wr.h"
45 #include "csio_mb.h"
46 #include "csio_defs.h"
47 
48 int csio_intr_coalesce_cnt;		/* value:SGE_INGRESS_RX_THRESHOLD[0] */
49 static int csio_sge_thresh_reg;		/* SGE_INGRESS_RX_THRESHOLD[0] */
50 
51 int csio_intr_coalesce_time = 10;	/* value:SGE_TIMER_VALUE_1 */
52 static int csio_sge_timer_reg = 1;
53 
54 #define CSIO_SET_FLBUF_SIZE(_hw, _reg, _val)				\
55 	csio_wr_reg32((_hw), (_val), SGE_FL_BUFFER_SIZE##_reg##_A)
56 
57 static void
58 csio_get_flbuf_size(struct csio_hw *hw, struct csio_sge *sge, uint32_t reg)
59 {
60 	sge->sge_fl_buf_size[reg] = csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE0_A +
61 							reg * sizeof(uint32_t));
62 }
63 
64 /* Free list buffer size */
65 static inline uint32_t
66 csio_wr_fl_bufsz(struct csio_sge *sge, struct csio_dma_buf *buf)
67 {
68 	return sge->sge_fl_buf_size[buf->paddr & 0xF];
69 }
70 
71 /* Size of the egress queue status page */
72 static inline uint32_t
73 csio_wr_qstat_pgsz(struct csio_hw *hw)
74 {
75 	return (hw->wrm.sge.sge_control & EGRSTATUSPAGESIZE_F) ?  128 : 64;
76 }
77 
78 /* Ring freelist doorbell */
79 static inline void
80 csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq)
81 {
82 	/*
83 	 * Ring the doorbell only when we have atleast CSIO_QCREDIT_SZ
84 	 * number of bytes in the freelist queue. This translates to atleast
85 	 * 8 freelist buffer pointers (since each pointer is 8 bytes).
86 	 */
87 	if (flq->inc_idx >= 8) {
88 		csio_wr_reg32(hw, DBPRIO_F | QID_V(flq->un.fl.flid) |
89 				  PIDX_T5_V(flq->inc_idx / 8) | DBTYPE_F,
90 				  MYPF_REG(SGE_PF_KDOORBELL_A));
91 		flq->inc_idx &= 7;
92 	}
93 }
94 
95 /* Write a 0 cidx increment value to enable SGE interrupts for this queue */
96 static void
97 csio_wr_sge_intr_enable(struct csio_hw *hw, uint16_t iqid)
98 {
99 	csio_wr_reg32(hw, CIDXINC_V(0)		|
100 			  INGRESSQID_V(iqid)	|
101 			  TIMERREG_V(X_TIMERREG_RESTART_COUNTER),
102 			  MYPF_REG(SGE_PF_GTS_A));
103 }
104 
105 /*
106  * csio_wr_fill_fl - Populate the FL buffers of a FL queue.
107  * @hw: HW module.
108  * @flq: Freelist queue.
109  *
110  * Fill up freelist buffer entries with buffers of size specified
111  * in the size register.
112  *
113  */
114 static int
115 csio_wr_fill_fl(struct csio_hw *hw, struct csio_q *flq)
116 {
117 	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
118 	struct csio_sge *sge = &wrm->sge;
119 	__be64 *d = (__be64 *)(flq->vstart);
120 	struct csio_dma_buf *buf = &flq->un.fl.bufs[0];
121 	uint64_t paddr;
122 	int sreg = flq->un.fl.sreg;
123 	int n = flq->credits;
124 
125 	while (n--) {
126 		buf->len = sge->sge_fl_buf_size[sreg];
127 		buf->vaddr = dma_alloc_coherent(&hw->pdev->dev, buf->len,
128 						&buf->paddr, GFP_KERNEL);
129 		if (!buf->vaddr) {
130 			csio_err(hw, "Could only fill %d buffers!\n", n + 1);
131 			return -ENOMEM;
132 		}
133 
134 		paddr = buf->paddr | (sreg & 0xF);
135 
136 		*d++ = cpu_to_be64(paddr);
137 		buf++;
138 	}
139 
140 	return 0;
141 }
142 
143 /*
144  * csio_wr_update_fl -
145  * @hw: HW module.
146  * @flq: Freelist queue.
147  *
148  *
149  */
150 static inline void
151 csio_wr_update_fl(struct csio_hw *hw, struct csio_q *flq, uint16_t n)
152 {
153 
154 	flq->inc_idx += n;
155 	flq->pidx += n;
156 	if (unlikely(flq->pidx >= flq->credits))
157 		flq->pidx -= (uint16_t)flq->credits;
158 
159 	CSIO_INC_STATS(flq, n_flq_refill);
160 }
161 
162 /*
163  * csio_wr_alloc_q - Allocate a WR queue and initialize it.
164  * @hw: HW module
165  * @qsize: Size of the queue in bytes
166  * @wrsize: Since of WR in this queue, if fixed.
167  * @type: Type of queue (Ingress/Egress/Freelist)
168  * @owner: Module that owns this queue.
169  * @nflb: Number of freelist buffers for FL.
170  * @sreg: What is the FL buffer size register?
171  * @iq_int_handler: Ingress queue handler in INTx mode.
172  *
173  * This function allocates and sets up a queue for the caller
174  * of size qsize, aligned at the required boundary. This is subject to
175  * be free entries being available in the queue array. If one is found,
176  * it is initialized with the allocated queue, marked as being used (owner),
177  * and a handle returned to the caller in form of the queue's index
178  * into the q_arr array.
179  * If user has indicated a freelist (by specifying nflb > 0), create
180  * another queue (with its own index into q_arr) for the freelist. Allocate
181  * memory for DMA buffer metadata (vaddr, len etc). Save off the freelist
182  * idx in the ingress queue's flq.idx. This is how a Freelist is associated
183  * with its owning ingress queue.
184  */
185 int
186 csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize,
187 		uint16_t type, void *owner, uint32_t nflb, int sreg,
188 		iq_handler_t iq_intx_handler)
189 {
190 	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
191 	struct csio_q	*q, *flq;
192 	int		free_idx = wrm->free_qidx;
193 	int		ret_idx = free_idx;
194 	uint32_t	qsz;
195 	int flq_idx;
196 
197 	if (free_idx >= wrm->num_q) {
198 		csio_err(hw, "No more free queues.\n");
199 		return -1;
200 	}
201 
202 	switch (type) {
203 	case CSIO_EGRESS:
204 		qsz = ALIGN(qsize, CSIO_QCREDIT_SZ) + csio_wr_qstat_pgsz(hw);
205 		break;
206 	case CSIO_INGRESS:
207 		switch (wrsize) {
208 		case 16:
209 		case 32:
210 		case 64:
211 		case 128:
212 			break;
213 		default:
214 			csio_err(hw, "Invalid Ingress queue WR size:%d\n",
215 				    wrsize);
216 			return -1;
217 		}
218 
219 		/*
220 		 * Number of elements must be a multiple of 16
221 		 * So this includes status page size
222 		 */
223 		qsz = ALIGN(qsize/wrsize, 16) * wrsize;
224 
225 		break;
226 	case CSIO_FREELIST:
227 		qsz = ALIGN(qsize/wrsize, 8) * wrsize + csio_wr_qstat_pgsz(hw);
228 		break;
229 	default:
230 		csio_err(hw, "Invalid queue type: 0x%x\n", type);
231 		return -1;
232 	}
233 
234 	q = wrm->q_arr[free_idx];
235 
236 	q->vstart = dma_alloc_coherent(&hw->pdev->dev, qsz, &q->pstart,
237 				       GFP_KERNEL);
238 	if (!q->vstart) {
239 		csio_err(hw,
240 			 "Failed to allocate DMA memory for "
241 			 "queue at id: %d size: %d\n", free_idx, qsize);
242 		return -1;
243 	}
244 
245 	q->type		= type;
246 	q->owner	= owner;
247 	q->pidx		= q->cidx = q->inc_idx = 0;
248 	q->size		= qsz;
249 	q->wr_sz	= wrsize;	/* If using fixed size WRs */
250 
251 	wrm->free_qidx++;
252 
253 	if (type == CSIO_INGRESS) {
254 		/* Since queue area is set to zero */
255 		q->un.iq.genbit	= 1;
256 
257 		/*
258 		 * Ingress queue status page size is always the size of
259 		 * the ingress queue entry.
260 		 */
261 		q->credits	= (qsz - q->wr_sz) / q->wr_sz;
262 		q->vwrap	= (void *)((uintptr_t)(q->vstart) + qsz
263 							- q->wr_sz);
264 
265 		/* Allocate memory for FL if requested */
266 		if (nflb > 0) {
267 			flq_idx = csio_wr_alloc_q(hw, nflb * sizeof(__be64),
268 						  sizeof(__be64), CSIO_FREELIST,
269 						  owner, 0, sreg, NULL);
270 			if (flq_idx == -1) {
271 				csio_err(hw,
272 					 "Failed to allocate FL queue"
273 					 " for IQ idx:%d\n", free_idx);
274 				return -1;
275 			}
276 
277 			/* Associate the new FL with the Ingress quue */
278 			q->un.iq.flq_idx = flq_idx;
279 
280 			flq = wrm->q_arr[q->un.iq.flq_idx];
281 			flq->un.fl.bufs = kcalloc(flq->credits,
282 						  sizeof(struct csio_dma_buf),
283 						  GFP_KERNEL);
284 			if (!flq->un.fl.bufs) {
285 				csio_err(hw,
286 					 "Failed to allocate FL queue bufs"
287 					 " for IQ idx:%d\n", free_idx);
288 				return -1;
289 			}
290 
291 			flq->un.fl.packen = 0;
292 			flq->un.fl.offset = 0;
293 			flq->un.fl.sreg = sreg;
294 
295 			/* Fill up the free list buffers */
296 			if (csio_wr_fill_fl(hw, flq))
297 				return -1;
298 
299 			/*
300 			 * Make sure in a FLQ, atleast 1 credit (8 FL buffers)
301 			 * remains unpopulated,otherwise HW thinks
302 			 * FLQ is empty.
303 			 */
304 			flq->pidx = flq->inc_idx = flq->credits - 8;
305 		} else {
306 			q->un.iq.flq_idx = -1;
307 		}
308 
309 		/* Associate the IQ INTx handler. */
310 		q->un.iq.iq_intx_handler = iq_intx_handler;
311 
312 		csio_q_iqid(hw, ret_idx) = CSIO_MAX_QID;
313 
314 	} else if (type == CSIO_EGRESS) {
315 		q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / CSIO_QCREDIT_SZ;
316 		q->vwrap   = (void *)((uintptr_t)(q->vstart) + qsz
317 						- csio_wr_qstat_pgsz(hw));
318 		csio_q_eqid(hw, ret_idx) = CSIO_MAX_QID;
319 	} else { /* Freelist */
320 		q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / sizeof(__be64);
321 		q->vwrap   = (void *)((uintptr_t)(q->vstart) + qsz
322 						- csio_wr_qstat_pgsz(hw));
323 		csio_q_flid(hw, ret_idx) = CSIO_MAX_QID;
324 	}
325 
326 	return ret_idx;
327 }
328 
329 /*
330  * csio_wr_iq_create_rsp - Response handler for IQ creation.
331  * @hw: The HW module.
332  * @mbp: Mailbox.
333  * @iq_idx: Ingress queue that got created.
334  *
335  * Handle FW_IQ_CMD mailbox completion. Save off the assigned IQ/FL ids.
336  */
337 static int
338 csio_wr_iq_create_rsp(struct csio_hw *hw, struct csio_mb *mbp, int iq_idx)
339 {
340 	struct csio_iq_params iqp;
341 	enum fw_retval retval;
342 	uint32_t iq_id;
343 	int flq_idx;
344 
345 	memset(&iqp, 0, sizeof(struct csio_iq_params));
346 
347 	csio_mb_iq_alloc_write_rsp(hw, mbp, &retval, &iqp);
348 
349 	if (retval != FW_SUCCESS) {
350 		csio_err(hw, "IQ cmd returned 0x%x!\n", retval);
351 		mempool_free(mbp, hw->mb_mempool);
352 		return -EINVAL;
353 	}
354 
355 	csio_q_iqid(hw, iq_idx)		= iqp.iqid;
356 	csio_q_physiqid(hw, iq_idx)	= iqp.physiqid;
357 	csio_q_pidx(hw, iq_idx)		= csio_q_cidx(hw, iq_idx) = 0;
358 	csio_q_inc_idx(hw, iq_idx)	= 0;
359 
360 	/* Actual iq-id. */
361 	iq_id = iqp.iqid - hw->wrm.fw_iq_start;
362 
363 	/* Set the iq-id to iq map table. */
364 	if (iq_id >= CSIO_MAX_IQ) {
365 		csio_err(hw,
366 			 "Exceeding MAX_IQ(%d) supported!"
367 			 " iqid:%d rel_iqid:%d FW iq_start:%d\n",
368 			 CSIO_MAX_IQ, iq_id, iqp.iqid, hw->wrm.fw_iq_start);
369 		mempool_free(mbp, hw->mb_mempool);
370 		return -EINVAL;
371 	}
372 	csio_q_set_intr_map(hw, iq_idx, iq_id);
373 
374 	/*
375 	 * During FW_IQ_CMD, FW sets interrupt_sent bit to 1 in the SGE
376 	 * ingress context of this queue. This will block interrupts to
377 	 * this queue until the next GTS write. Therefore, we do a
378 	 * 0-cidx increment GTS write for this queue just to clear the
379 	 * interrupt_sent bit. This will re-enable interrupts to this
380 	 * queue.
381 	 */
382 	csio_wr_sge_intr_enable(hw, iqp.physiqid);
383 
384 	flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
385 	if (flq_idx != -1) {
386 		struct csio_q *flq = hw->wrm.q_arr[flq_idx];
387 
388 		csio_q_flid(hw, flq_idx) = iqp.fl0id;
389 		csio_q_cidx(hw, flq_idx) = 0;
390 		csio_q_pidx(hw, flq_idx)    = csio_q_credits(hw, flq_idx) - 8;
391 		csio_q_inc_idx(hw, flq_idx) = csio_q_credits(hw, flq_idx) - 8;
392 
393 		/* Now update SGE about the buffers allocated during init */
394 		csio_wr_ring_fldb(hw, flq);
395 	}
396 
397 	mempool_free(mbp, hw->mb_mempool);
398 
399 	return 0;
400 }
401 
402 /*
403  * csio_wr_iq_create - Configure an Ingress queue with FW.
404  * @hw: The HW module.
405  * @priv: Private data object.
406  * @iq_idx: Ingress queue index in the WR module.
407  * @vec: MSIX vector.
408  * @portid: PCIE Channel to be associated with this queue.
409  * @async: Is this a FW asynchronous message handling queue?
410  * @cbfn: Completion callback.
411  *
412  * This API configures an ingress queue with FW by issuing a FW_IQ_CMD mailbox
413  * with alloc/write bits set.
414  */
415 int
416 csio_wr_iq_create(struct csio_hw *hw, void *priv, int iq_idx,
417 		  uint32_t vec, uint8_t portid, bool async,
418 		  void (*cbfn) (struct csio_hw *, struct csio_mb *))
419 {
420 	struct csio_mb  *mbp;
421 	struct csio_iq_params iqp;
422 	int flq_idx;
423 
424 	memset(&iqp, 0, sizeof(struct csio_iq_params));
425 	csio_q_portid(hw, iq_idx) = portid;
426 
427 	mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
428 	if (!mbp) {
429 		csio_err(hw, "IQ command out of memory!\n");
430 		return -ENOMEM;
431 	}
432 
433 	switch (hw->intr_mode) {
434 	case CSIO_IM_INTX:
435 	case CSIO_IM_MSI:
436 		/* For interrupt forwarding queue only */
437 		if (hw->intr_iq_idx == iq_idx)
438 			iqp.iqandst	= X_INTERRUPTDESTINATION_PCIE;
439 		else
440 			iqp.iqandst	= X_INTERRUPTDESTINATION_IQ;
441 		iqp.iqandstindex	=
442 			csio_q_physiqid(hw, hw->intr_iq_idx);
443 		break;
444 	case CSIO_IM_MSIX:
445 		iqp.iqandst		= X_INTERRUPTDESTINATION_PCIE;
446 		iqp.iqandstindex	= (uint16_t)vec;
447 		break;
448 	case CSIO_IM_NONE:
449 		mempool_free(mbp, hw->mb_mempool);
450 		return -EINVAL;
451 	}
452 
453 	/* Pass in the ingress queue cmd parameters */
454 	iqp.pfn			= hw->pfn;
455 	iqp.vfn			= 0;
456 	iqp.iq_start		= 1;
457 	iqp.viid		= 0;
458 	iqp.type		= FW_IQ_TYPE_FL_INT_CAP;
459 	iqp.iqasynch		= async;
460 	if (csio_intr_coalesce_cnt)
461 		iqp.iqanus	= X_UPDATESCHEDULING_COUNTER_OPTTIMER;
462 	else
463 		iqp.iqanus	= X_UPDATESCHEDULING_TIMER;
464 	iqp.iqanud		= X_UPDATEDELIVERY_INTERRUPT;
465 	iqp.iqpciech		= portid;
466 	iqp.iqintcntthresh	= (uint8_t)csio_sge_thresh_reg;
467 
468 	switch (csio_q_wr_sz(hw, iq_idx)) {
469 	case 16:
470 		iqp.iqesize = 0; break;
471 	case 32:
472 		iqp.iqesize = 1; break;
473 	case 64:
474 		iqp.iqesize = 2; break;
475 	case 128:
476 		iqp.iqesize = 3; break;
477 	}
478 
479 	iqp.iqsize		= csio_q_size(hw, iq_idx) /
480 						csio_q_wr_sz(hw, iq_idx);
481 	iqp.iqaddr		= csio_q_pstart(hw, iq_idx);
482 
483 	flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
484 	if (flq_idx != -1) {
485 		enum chip_type chip = CHELSIO_CHIP_VERSION(hw->chip_id);
486 		struct csio_q *flq = hw->wrm.q_arr[flq_idx];
487 
488 		iqp.fl0paden	= 1;
489 		iqp.fl0packen	= flq->un.fl.packen ? 1 : 0;
490 		iqp.fl0fbmin	= X_FETCHBURSTMIN_64B;
491 		iqp.fl0fbmax	= ((chip == CHELSIO_T5) ?
492 				  X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B);
493 		iqp.fl0size	= csio_q_size(hw, flq_idx) / CSIO_QCREDIT_SZ;
494 		iqp.fl0addr	= csio_q_pstart(hw, flq_idx);
495 	}
496 
497 	csio_mb_iq_alloc_write(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &iqp, cbfn);
498 
499 	if (csio_mb_issue(hw, mbp)) {
500 		csio_err(hw, "Issue of IQ cmd failed!\n");
501 		mempool_free(mbp, hw->mb_mempool);
502 		return -EINVAL;
503 	}
504 
505 	if (cbfn != NULL)
506 		return 0;
507 
508 	return csio_wr_iq_create_rsp(hw, mbp, iq_idx);
509 }
510 
511 /*
512  * csio_wr_eq_create_rsp - Response handler for EQ creation.
513  * @hw: The HW module.
514  * @mbp: Mailbox.
515  * @eq_idx: Egress queue that got created.
516  *
517  * Handle FW_EQ_OFLD_CMD mailbox completion. Save off the assigned EQ ids.
518  */
519 static int
520 csio_wr_eq_cfg_rsp(struct csio_hw *hw, struct csio_mb *mbp, int eq_idx)
521 {
522 	struct csio_eq_params eqp;
523 	enum fw_retval retval;
524 
525 	memset(&eqp, 0, sizeof(struct csio_eq_params));
526 
527 	csio_mb_eq_ofld_alloc_write_rsp(hw, mbp, &retval, &eqp);
528 
529 	if (retval != FW_SUCCESS) {
530 		csio_err(hw, "EQ OFLD cmd returned 0x%x!\n", retval);
531 		mempool_free(mbp, hw->mb_mempool);
532 		return -EINVAL;
533 	}
534 
535 	csio_q_eqid(hw, eq_idx)	= (uint16_t)eqp.eqid;
536 	csio_q_physeqid(hw, eq_idx) = (uint16_t)eqp.physeqid;
537 	csio_q_pidx(hw, eq_idx)	= csio_q_cidx(hw, eq_idx) = 0;
538 	csio_q_inc_idx(hw, eq_idx) = 0;
539 
540 	mempool_free(mbp, hw->mb_mempool);
541 
542 	return 0;
543 }
544 
545 /*
546  * csio_wr_eq_create - Configure an Egress queue with FW.
547  * @hw: HW module.
548  * @priv: Private data.
549  * @eq_idx: Egress queue index in the WR module.
550  * @iq_idx: Associated ingress queue index.
551  * @cbfn: Completion callback.
552  *
553  * This API configures a offload egress queue with FW by issuing a
554  * FW_EQ_OFLD_CMD  (with alloc + write ) mailbox.
555  */
556 int
557 csio_wr_eq_create(struct csio_hw *hw, void *priv, int eq_idx,
558 		  int iq_idx, uint8_t portid,
559 		  void (*cbfn) (struct csio_hw *, struct csio_mb *))
560 {
561 	struct csio_mb  *mbp;
562 	struct csio_eq_params eqp;
563 
564 	memset(&eqp, 0, sizeof(struct csio_eq_params));
565 
566 	mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
567 	if (!mbp) {
568 		csio_err(hw, "EQ command out of memory!\n");
569 		return -ENOMEM;
570 	}
571 
572 	eqp.pfn			= hw->pfn;
573 	eqp.vfn			= 0;
574 	eqp.eqstart		= 1;
575 	eqp.hostfcmode		= X_HOSTFCMODE_STATUS_PAGE;
576 	eqp.iqid		= csio_q_iqid(hw, iq_idx);
577 	eqp.fbmin		= X_FETCHBURSTMIN_64B;
578 	eqp.fbmax		= X_FETCHBURSTMAX_512B;
579 	eqp.cidxfthresh		= 0;
580 	eqp.pciechn		= portid;
581 	eqp.eqsize		= csio_q_size(hw, eq_idx) / CSIO_QCREDIT_SZ;
582 	eqp.eqaddr		= csio_q_pstart(hw, eq_idx);
583 
584 	csio_mb_eq_ofld_alloc_write(hw, mbp, priv, CSIO_MB_DEFAULT_TMO,
585 				    &eqp, cbfn);
586 
587 	if (csio_mb_issue(hw, mbp)) {
588 		csio_err(hw, "Issue of EQ OFLD cmd failed!\n");
589 		mempool_free(mbp, hw->mb_mempool);
590 		return -EINVAL;
591 	}
592 
593 	if (cbfn != NULL)
594 		return 0;
595 
596 	return csio_wr_eq_cfg_rsp(hw, mbp, eq_idx);
597 }
598 
599 /*
600  * csio_wr_iq_destroy_rsp - Response handler for IQ removal.
601  * @hw: The HW module.
602  * @mbp: Mailbox.
603  * @iq_idx: Ingress queue that was freed.
604  *
605  * Handle FW_IQ_CMD (free) mailbox completion.
606  */
607 static int
608 csio_wr_iq_destroy_rsp(struct csio_hw *hw, struct csio_mb *mbp, int iq_idx)
609 {
610 	enum fw_retval retval = csio_mb_fw_retval(mbp);
611 	int rv = 0;
612 
613 	if (retval != FW_SUCCESS)
614 		rv = -EINVAL;
615 
616 	mempool_free(mbp, hw->mb_mempool);
617 
618 	return rv;
619 }
620 
621 /*
622  * csio_wr_iq_destroy - Free an ingress queue.
623  * @hw: The HW module.
624  * @priv: Private data object.
625  * @iq_idx: Ingress queue index to destroy
626  * @cbfn: Completion callback.
627  *
628  * This API frees an ingress queue by issuing the FW_IQ_CMD
629  * with the free bit set.
630  */
631 static int
632 csio_wr_iq_destroy(struct csio_hw *hw, void *priv, int iq_idx,
633 		   void (*cbfn)(struct csio_hw *, struct csio_mb *))
634 {
635 	int rv = 0;
636 	struct csio_mb  *mbp;
637 	struct csio_iq_params iqp;
638 	int flq_idx;
639 
640 	memset(&iqp, 0, sizeof(struct csio_iq_params));
641 
642 	mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
643 	if (!mbp)
644 		return -ENOMEM;
645 
646 	iqp.pfn		= hw->pfn;
647 	iqp.vfn		= 0;
648 	iqp.iqid	= csio_q_iqid(hw, iq_idx);
649 	iqp.type	= FW_IQ_TYPE_FL_INT_CAP;
650 
651 	flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
652 	if (flq_idx != -1)
653 		iqp.fl0id = csio_q_flid(hw, flq_idx);
654 	else
655 		iqp.fl0id = 0xFFFF;
656 
657 	iqp.fl1id = 0xFFFF;
658 
659 	csio_mb_iq_free(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &iqp, cbfn);
660 
661 	rv = csio_mb_issue(hw, mbp);
662 	if (rv != 0) {
663 		mempool_free(mbp, hw->mb_mempool);
664 		return rv;
665 	}
666 
667 	if (cbfn != NULL)
668 		return 0;
669 
670 	return csio_wr_iq_destroy_rsp(hw, mbp, iq_idx);
671 }
672 
673 /*
674  * csio_wr_eq_destroy_rsp - Response handler for OFLD EQ creation.
675  * @hw: The HW module.
676  * @mbp: Mailbox.
677  * @eq_idx: Egress queue that was freed.
678  *
679  * Handle FW_OFLD_EQ_CMD (free) mailbox completion.
680  */
681 static int
682 csio_wr_eq_destroy_rsp(struct csio_hw *hw, struct csio_mb *mbp, int eq_idx)
683 {
684 	enum fw_retval retval = csio_mb_fw_retval(mbp);
685 	int rv = 0;
686 
687 	if (retval != FW_SUCCESS)
688 		rv = -EINVAL;
689 
690 	mempool_free(mbp, hw->mb_mempool);
691 
692 	return rv;
693 }
694 
695 /*
696  * csio_wr_eq_destroy - Free an Egress queue.
697  * @hw: The HW module.
698  * @priv: Private data object.
699  * @eq_idx: Egress queue index to destroy
700  * @cbfn: Completion callback.
701  *
702  * This API frees an Egress queue by issuing the FW_EQ_OFLD_CMD
703  * with the free bit set.
704  */
705 static int
706 csio_wr_eq_destroy(struct csio_hw *hw, void *priv, int eq_idx,
707 		   void (*cbfn) (struct csio_hw *, struct csio_mb *))
708 {
709 	int rv = 0;
710 	struct csio_mb  *mbp;
711 	struct csio_eq_params eqp;
712 
713 	memset(&eqp, 0, sizeof(struct csio_eq_params));
714 
715 	mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
716 	if (!mbp)
717 		return -ENOMEM;
718 
719 	eqp.pfn		= hw->pfn;
720 	eqp.vfn		= 0;
721 	eqp.eqid	= csio_q_eqid(hw, eq_idx);
722 
723 	csio_mb_eq_ofld_free(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &eqp, cbfn);
724 
725 	rv = csio_mb_issue(hw, mbp);
726 	if (rv != 0) {
727 		mempool_free(mbp, hw->mb_mempool);
728 		return rv;
729 	}
730 
731 	if (cbfn != NULL)
732 		return 0;
733 
734 	return csio_wr_eq_destroy_rsp(hw, mbp, eq_idx);
735 }
736 
737 /*
738  * csio_wr_cleanup_eq_stpg - Cleanup Egress queue status page
739  * @hw: HW module
740  * @qidx: Egress queue index
741  *
742  * Cleanup the Egress queue status page.
743  */
744 static void
745 csio_wr_cleanup_eq_stpg(struct csio_hw *hw, int qidx)
746 {
747 	struct csio_q	*q = csio_hw_to_wrm(hw)->q_arr[qidx];
748 	struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap;
749 
750 	memset(stp, 0, sizeof(*stp));
751 }
752 
753 /*
754  * csio_wr_cleanup_iq_ftr - Cleanup Footer entries in IQ
755  * @hw: HW module
756  * @qidx: Ingress queue index
757  *
758  * Cleanup the footer entries in the given ingress queue,
759  * set to 1 the internal copy of genbit.
760  */
761 static void
762 csio_wr_cleanup_iq_ftr(struct csio_hw *hw, int qidx)
763 {
764 	struct csio_wrm *wrm	= csio_hw_to_wrm(hw);
765 	struct csio_q	*q	= wrm->q_arr[qidx];
766 	void *wr;
767 	struct csio_iqwr_footer *ftr;
768 	uint32_t i = 0;
769 
770 	/* set to 1 since we are just about zero out genbit */
771 	q->un.iq.genbit = 1;
772 
773 	for (i = 0; i < q->credits; i++) {
774 		/* Get the WR */
775 		wr = (void *)((uintptr_t)q->vstart +
776 					   (i * q->wr_sz));
777 		/* Get the footer */
778 		ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
779 					  (q->wr_sz - sizeof(*ftr)));
780 		/* Zero out footer */
781 		memset(ftr, 0, sizeof(*ftr));
782 	}
783 }
784 
785 int
786 csio_wr_destroy_queues(struct csio_hw *hw, bool cmd)
787 {
788 	int i, flq_idx;
789 	struct csio_q *q;
790 	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
791 	int rv;
792 
793 	for (i = 0; i < wrm->free_qidx; i++) {
794 		q = wrm->q_arr[i];
795 
796 		switch (q->type) {
797 		case CSIO_EGRESS:
798 			if (csio_q_eqid(hw, i) != CSIO_MAX_QID) {
799 				csio_wr_cleanup_eq_stpg(hw, i);
800 				if (!cmd) {
801 					csio_q_eqid(hw, i) = CSIO_MAX_QID;
802 					continue;
803 				}
804 
805 				rv = csio_wr_eq_destroy(hw, NULL, i, NULL);
806 				if ((rv == -EBUSY) || (rv == -ETIMEDOUT))
807 					cmd = false;
808 
809 				csio_q_eqid(hw, i) = CSIO_MAX_QID;
810 			}
811 			/* fall through */
812 		case CSIO_INGRESS:
813 			if (csio_q_iqid(hw, i) != CSIO_MAX_QID) {
814 				csio_wr_cleanup_iq_ftr(hw, i);
815 				if (!cmd) {
816 					csio_q_iqid(hw, i) = CSIO_MAX_QID;
817 					flq_idx = csio_q_iq_flq_idx(hw, i);
818 					if (flq_idx != -1)
819 						csio_q_flid(hw, flq_idx) =
820 								CSIO_MAX_QID;
821 					continue;
822 				}
823 
824 				rv = csio_wr_iq_destroy(hw, NULL, i, NULL);
825 				if ((rv == -EBUSY) || (rv == -ETIMEDOUT))
826 					cmd = false;
827 
828 				csio_q_iqid(hw, i) = CSIO_MAX_QID;
829 				flq_idx = csio_q_iq_flq_idx(hw, i);
830 				if (flq_idx != -1)
831 					csio_q_flid(hw, flq_idx) = CSIO_MAX_QID;
832 			}
833 		default:
834 			break;
835 		}
836 	}
837 
838 	hw->flags &= ~CSIO_HWF_Q_FW_ALLOCED;
839 
840 	return 0;
841 }
842 
843 /*
844  * csio_wr_get - Get requested size of WR entry/entries from queue.
845  * @hw: HW module.
846  * @qidx: Index of queue.
847  * @size: Cumulative size of Work request(s).
848  * @wrp: Work request pair.
849  *
850  * If requested credits are available, return the start address of the
851  * work request in the work request pair. Set pidx accordingly and
852  * return.
853  *
854  * NOTE about WR pair:
855  * ==================
856  * A WR can start towards the end of a queue, and then continue at the
857  * beginning, since the queue is considered to be circular. This will
858  * require a pair of address/size to be passed back to the caller -
859  * hence Work request pair format.
860  */
861 int
862 csio_wr_get(struct csio_hw *hw, int qidx, uint32_t size,
863 	    struct csio_wr_pair *wrp)
864 {
865 	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
866 	struct csio_q *q = wrm->q_arr[qidx];
867 	void *cwr = (void *)((uintptr_t)(q->vstart) +
868 						(q->pidx * CSIO_QCREDIT_SZ));
869 	struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap;
870 	uint16_t cidx = q->cidx = ntohs(stp->cidx);
871 	uint16_t pidx = q->pidx;
872 	uint32_t req_sz	= ALIGN(size, CSIO_QCREDIT_SZ);
873 	int req_credits	= req_sz / CSIO_QCREDIT_SZ;
874 	int credits;
875 
876 	CSIO_DB_ASSERT(q->owner != NULL);
877 	CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx));
878 	CSIO_DB_ASSERT(cidx <= q->credits);
879 
880 	/* Calculate credits */
881 	if (pidx > cidx) {
882 		credits = q->credits - (pidx - cidx) - 1;
883 	} else if (cidx > pidx) {
884 		credits = cidx - pidx - 1;
885 	} else {
886 		/* cidx == pidx, empty queue */
887 		credits = q->credits;
888 		CSIO_INC_STATS(q, n_qempty);
889 	}
890 
891 	/*
892 	 * Check if we have enough credits.
893 	 * credits = 1 implies queue is full.
894 	 */
895 	if (!credits || (req_credits > credits)) {
896 		CSIO_INC_STATS(q, n_qfull);
897 		return -EBUSY;
898 	}
899 
900 	/*
901 	 * If we are here, we have enough credits to satisfy the
902 	 * request. Check if we are near the end of q, and if WR spills over.
903 	 * If it does, use the first addr/size to cover the queue until
904 	 * the end. Fit the remainder portion of the request at the top
905 	 * of queue and return it in the second addr/len. Set pidx
906 	 * accordingly.
907 	 */
908 	if (unlikely(((uintptr_t)cwr + req_sz) > (uintptr_t)(q->vwrap))) {
909 		wrp->addr1 = cwr;
910 		wrp->size1 = (uint32_t)((uintptr_t)q->vwrap - (uintptr_t)cwr);
911 		wrp->addr2 = q->vstart;
912 		wrp->size2 = req_sz - wrp->size1;
913 		q->pidx	= (uint16_t)(ALIGN(wrp->size2, CSIO_QCREDIT_SZ) /
914 							CSIO_QCREDIT_SZ);
915 		CSIO_INC_STATS(q, n_qwrap);
916 		CSIO_INC_STATS(q, n_eq_wr_split);
917 	} else {
918 		wrp->addr1 = cwr;
919 		wrp->size1 = req_sz;
920 		wrp->addr2 = NULL;
921 		wrp->size2 = 0;
922 		q->pidx	+= (uint16_t)req_credits;
923 
924 		/* We are the end of queue, roll back pidx to top of queue */
925 		if (unlikely(q->pidx == q->credits)) {
926 			q->pidx = 0;
927 			CSIO_INC_STATS(q, n_qwrap);
928 		}
929 	}
930 
931 	q->inc_idx = (uint16_t)req_credits;
932 
933 	CSIO_INC_STATS(q, n_tot_reqs);
934 
935 	return 0;
936 }
937 
938 /*
939  * csio_wr_copy_to_wrp - Copies given data into WR.
940  * @data_buf - Data buffer
941  * @wrp - Work request pair.
942  * @wr_off - Work request offset.
943  * @data_len - Data length.
944  *
945  * Copies the given data in Work Request. Work request pair(wrp) specifies
946  * address information of Work request.
947  * Returns: none
948  */
949 void
950 csio_wr_copy_to_wrp(void *data_buf, struct csio_wr_pair *wrp,
951 		   uint32_t wr_off, uint32_t data_len)
952 {
953 	uint32_t nbytes;
954 
955 	/* Number of space available in buffer addr1 of WRP */
956 	nbytes = ((wrp->size1 - wr_off) >= data_len) ?
957 					data_len : (wrp->size1 - wr_off);
958 
959 	memcpy((uint8_t *) wrp->addr1 + wr_off, data_buf, nbytes);
960 	data_len -= nbytes;
961 
962 	/* Write the remaining data from the begining of circular buffer */
963 	if (data_len) {
964 		CSIO_DB_ASSERT(data_len <= wrp->size2);
965 		CSIO_DB_ASSERT(wrp->addr2 != NULL);
966 		memcpy(wrp->addr2, (uint8_t *) data_buf + nbytes, data_len);
967 	}
968 }
969 
970 /*
971  * csio_wr_issue - Notify chip of Work request.
972  * @hw: HW module.
973  * @qidx: Index of queue.
974  * @prio: 0: Low priority, 1: High priority
975  *
976  * Rings the SGE Doorbell by writing the current producer index of the passed
977  * in queue into the register.
978  *
979  */
980 int
981 csio_wr_issue(struct csio_hw *hw, int qidx, bool prio)
982 {
983 	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
984 	struct csio_q *q = wrm->q_arr[qidx];
985 
986 	CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx));
987 
988 	wmb();
989 	/* Ring SGE Doorbell writing q->pidx into it */
990 	csio_wr_reg32(hw, DBPRIO_V(prio) | QID_V(q->un.eq.physeqid) |
991 			  PIDX_T5_V(q->inc_idx) | DBTYPE_F,
992 			  MYPF_REG(SGE_PF_KDOORBELL_A));
993 	q->inc_idx = 0;
994 
995 	return 0;
996 }
997 
998 static inline uint32_t
999 csio_wr_avail_qcredits(struct csio_q *q)
1000 {
1001 	if (q->pidx > q->cidx)
1002 		return q->pidx - q->cidx;
1003 	else if (q->cidx > q->pidx)
1004 		return q->credits - (q->cidx - q->pidx);
1005 	else
1006 		return 0;	/* cidx == pidx, empty queue */
1007 }
1008 
1009 /*
1010  * csio_wr_inval_flq_buf - Invalidate a free list buffer entry.
1011  * @hw: HW module.
1012  * @flq: The freelist queue.
1013  *
1014  * Invalidate the driver's version of a freelist buffer entry,
1015  * without freeing the associated the DMA memory. The entry
1016  * to be invalidated is picked up from the current Free list
1017  * queue cidx.
1018  *
1019  */
1020 static inline void
1021 csio_wr_inval_flq_buf(struct csio_hw *hw, struct csio_q *flq)
1022 {
1023 	flq->cidx++;
1024 	if (flq->cidx == flq->credits) {
1025 		flq->cidx = 0;
1026 		CSIO_INC_STATS(flq, n_qwrap);
1027 	}
1028 }
1029 
1030 /*
1031  * csio_wr_process_fl - Process a freelist completion.
1032  * @hw: HW module.
1033  * @q: The ingress queue attached to the Freelist.
1034  * @wr: The freelist completion WR in the ingress queue.
1035  * @len_to_qid: The lower 32-bits of the first flit of the RSP footer
1036  * @iq_handler: Caller's handler for this completion.
1037  * @priv: Private pointer of caller
1038  *
1039  */
1040 static inline void
1041 csio_wr_process_fl(struct csio_hw *hw, struct csio_q *q,
1042 		   void *wr, uint32_t len_to_qid,
1043 		   void (*iq_handler)(struct csio_hw *, void *,
1044 				      uint32_t, struct csio_fl_dma_buf *,
1045 				      void *),
1046 		   void *priv)
1047 {
1048 	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1049 	struct csio_sge *sge = &wrm->sge;
1050 	struct csio_fl_dma_buf flb;
1051 	struct csio_dma_buf *buf, *fbuf;
1052 	uint32_t bufsz, len, lastlen = 0;
1053 	int n;
1054 	struct csio_q *flq = hw->wrm.q_arr[q->un.iq.flq_idx];
1055 
1056 	CSIO_DB_ASSERT(flq != NULL);
1057 
1058 	len = len_to_qid;
1059 
1060 	if (len & IQWRF_NEWBUF) {
1061 		if (flq->un.fl.offset > 0) {
1062 			csio_wr_inval_flq_buf(hw, flq);
1063 			flq->un.fl.offset = 0;
1064 		}
1065 		len = IQWRF_LEN_GET(len);
1066 	}
1067 
1068 	CSIO_DB_ASSERT(len != 0);
1069 
1070 	flb.totlen = len;
1071 
1072 	/* Consume all freelist buffers used for len bytes */
1073 	for (n = 0, fbuf = flb.flbufs; ; n++, fbuf++) {
1074 		buf = &flq->un.fl.bufs[flq->cidx];
1075 		bufsz = csio_wr_fl_bufsz(sge, buf);
1076 
1077 		fbuf->paddr	= buf->paddr;
1078 		fbuf->vaddr	= buf->vaddr;
1079 
1080 		flb.offset	= flq->un.fl.offset;
1081 		lastlen		= min(bufsz, len);
1082 		fbuf->len	= lastlen;
1083 
1084 		len -= lastlen;
1085 		if (!len)
1086 			break;
1087 		csio_wr_inval_flq_buf(hw, flq);
1088 	}
1089 
1090 	flb.defer_free = flq->un.fl.packen ? 0 : 1;
1091 
1092 	iq_handler(hw, wr, q->wr_sz - sizeof(struct csio_iqwr_footer),
1093 		   &flb, priv);
1094 
1095 	if (flq->un.fl.packen)
1096 		flq->un.fl.offset += ALIGN(lastlen, sge->csio_fl_align);
1097 	else
1098 		csio_wr_inval_flq_buf(hw, flq);
1099 
1100 }
1101 
1102 /*
1103  * csio_is_new_iqwr - Is this a new Ingress queue entry ?
1104  * @q: Ingress quueue.
1105  * @ftr: Ingress queue WR SGE footer.
1106  *
1107  * The entry is new if our generation bit matches the corresponding
1108  * bit in the footer of the current WR.
1109  */
1110 static inline bool
1111 csio_is_new_iqwr(struct csio_q *q, struct csio_iqwr_footer *ftr)
1112 {
1113 	return (q->un.iq.genbit == (ftr->u.type_gen >> IQWRF_GEN_SHIFT));
1114 }
1115 
1116 /*
1117  * csio_wr_process_iq - Process elements in Ingress queue.
1118  * @hw:  HW pointer
1119  * @qidx: Index of queue
1120  * @iq_handler: Handler for this queue
1121  * @priv: Caller's private pointer
1122  *
1123  * This routine walks through every entry of the ingress queue, calling
1124  * the provided iq_handler with the entry, until the generation bit
1125  * flips.
1126  */
1127 int
1128 csio_wr_process_iq(struct csio_hw *hw, struct csio_q *q,
1129 		   void (*iq_handler)(struct csio_hw *, void *,
1130 				      uint32_t, struct csio_fl_dma_buf *,
1131 				      void *),
1132 		   void *priv)
1133 {
1134 	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1135 	void *wr = (void *)((uintptr_t)q->vstart + (q->cidx * q->wr_sz));
1136 	struct csio_iqwr_footer *ftr;
1137 	uint32_t wr_type, fw_qid, qid;
1138 	struct csio_q *q_completed;
1139 	struct csio_q *flq = csio_iq_has_fl(q) ?
1140 					wrm->q_arr[q->un.iq.flq_idx] : NULL;
1141 	int rv = 0;
1142 
1143 	/* Get the footer */
1144 	ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
1145 					  (q->wr_sz - sizeof(*ftr)));
1146 
1147 	/*
1148 	 * When q wrapped around last time, driver should have inverted
1149 	 * ic.genbit as well.
1150 	 */
1151 	while (csio_is_new_iqwr(q, ftr)) {
1152 
1153 		CSIO_DB_ASSERT(((uintptr_t)wr + q->wr_sz) <=
1154 						(uintptr_t)q->vwrap);
1155 		rmb();
1156 		wr_type = IQWRF_TYPE_GET(ftr->u.type_gen);
1157 
1158 		switch (wr_type) {
1159 		case X_RSPD_TYPE_CPL:
1160 			/* Subtract footer from WR len */
1161 			iq_handler(hw, wr, q->wr_sz - sizeof(*ftr), NULL, priv);
1162 			break;
1163 		case X_RSPD_TYPE_FLBUF:
1164 			csio_wr_process_fl(hw, q, wr,
1165 					   ntohl(ftr->pldbuflen_qid),
1166 					   iq_handler, priv);
1167 			break;
1168 		case X_RSPD_TYPE_INTR:
1169 			fw_qid = ntohl(ftr->pldbuflen_qid);
1170 			qid = fw_qid - wrm->fw_iq_start;
1171 			q_completed = hw->wrm.intr_map[qid];
1172 
1173 			if (unlikely(qid ==
1174 					csio_q_physiqid(hw, hw->intr_iq_idx))) {
1175 				/*
1176 				 * We are already in the Forward Interrupt
1177 				 * Interrupt Queue Service! Do-not service
1178 				 * again!
1179 				 *
1180 				 */
1181 			} else {
1182 				CSIO_DB_ASSERT(q_completed);
1183 				CSIO_DB_ASSERT(
1184 					q_completed->un.iq.iq_intx_handler);
1185 
1186 				/* Call the queue handler. */
1187 				q_completed->un.iq.iq_intx_handler(hw, NULL,
1188 						0, NULL, (void *)q_completed);
1189 			}
1190 			break;
1191 		default:
1192 			csio_warn(hw, "Unknown resp type 0x%x received\n",
1193 				 wr_type);
1194 			CSIO_INC_STATS(q, n_rsp_unknown);
1195 			break;
1196 		}
1197 
1198 		/*
1199 		 * Ingress *always* has fixed size WR entries. Therefore,
1200 		 * there should always be complete WRs towards the end of
1201 		 * queue.
1202 		 */
1203 		if (((uintptr_t)wr + q->wr_sz) == (uintptr_t)q->vwrap) {
1204 
1205 			/* Roll over to start of queue */
1206 			q->cidx = 0;
1207 			wr	= q->vstart;
1208 
1209 			/* Toggle genbit */
1210 			q->un.iq.genbit ^= 0x1;
1211 
1212 			CSIO_INC_STATS(q, n_qwrap);
1213 		} else {
1214 			q->cidx++;
1215 			wr	= (void *)((uintptr_t)(q->vstart) +
1216 					   (q->cidx * q->wr_sz));
1217 		}
1218 
1219 		ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
1220 						  (q->wr_sz - sizeof(*ftr)));
1221 		q->inc_idx++;
1222 
1223 	} /* while (q->un.iq.genbit == hdr->genbit) */
1224 
1225 	/*
1226 	 * We need to re-arm SGE interrupts in case we got a stray interrupt,
1227 	 * especially in msix mode. With INTx, this may be a common occurence.
1228 	 */
1229 	if (unlikely(!q->inc_idx)) {
1230 		CSIO_INC_STATS(q, n_stray_comp);
1231 		rv = -EINVAL;
1232 		goto restart;
1233 	}
1234 
1235 	/* Replenish free list buffers if pending falls below low water mark */
1236 	if (flq) {
1237 		uint32_t avail  = csio_wr_avail_qcredits(flq);
1238 		if (avail <= 16) {
1239 			/* Make sure in FLQ, atleast 1 credit (8 FL buffers)
1240 			 * remains unpopulated otherwise HW thinks
1241 			 * FLQ is empty.
1242 			 */
1243 			csio_wr_update_fl(hw, flq, (flq->credits - 8) - avail);
1244 			csio_wr_ring_fldb(hw, flq);
1245 		}
1246 	}
1247 
1248 restart:
1249 	/* Now inform SGE about our incremental index value */
1250 	csio_wr_reg32(hw, CIDXINC_V(q->inc_idx)		|
1251 			  INGRESSQID_V(q->un.iq.physiqid)	|
1252 			  TIMERREG_V(csio_sge_timer_reg),
1253 			  MYPF_REG(SGE_PF_GTS_A));
1254 	q->stats.n_tot_rsps += q->inc_idx;
1255 
1256 	q->inc_idx = 0;
1257 
1258 	return rv;
1259 }
1260 
1261 int
1262 csio_wr_process_iq_idx(struct csio_hw *hw, int qidx,
1263 		   void (*iq_handler)(struct csio_hw *, void *,
1264 				      uint32_t, struct csio_fl_dma_buf *,
1265 				      void *),
1266 		   void *priv)
1267 {
1268 	struct csio_wrm *wrm	= csio_hw_to_wrm(hw);
1269 	struct csio_q	*iq	= wrm->q_arr[qidx];
1270 
1271 	return csio_wr_process_iq(hw, iq, iq_handler, priv);
1272 }
1273 
1274 static int
1275 csio_closest_timer(struct csio_sge *s, int time)
1276 {
1277 	int i, delta, match = 0, min_delta = INT_MAX;
1278 
1279 	for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1280 		delta = time - s->timer_val[i];
1281 		if (delta < 0)
1282 			delta = -delta;
1283 		if (delta < min_delta) {
1284 			min_delta = delta;
1285 			match = i;
1286 		}
1287 	}
1288 	return match;
1289 }
1290 
1291 static int
1292 csio_closest_thresh(struct csio_sge *s, int cnt)
1293 {
1294 	int i, delta, match = 0, min_delta = INT_MAX;
1295 
1296 	for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1297 		delta = cnt - s->counter_val[i];
1298 		if (delta < 0)
1299 			delta = -delta;
1300 		if (delta < min_delta) {
1301 			min_delta = delta;
1302 			match = i;
1303 		}
1304 	}
1305 	return match;
1306 }
1307 
1308 static void
1309 csio_wr_fixup_host_params(struct csio_hw *hw)
1310 {
1311 	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1312 	struct csio_sge *sge = &wrm->sge;
1313 	uint32_t clsz = L1_CACHE_BYTES;
1314 	uint32_t s_hps = PAGE_SHIFT - 10;
1315 	uint32_t stat_len = clsz > 64 ? 128 : 64;
1316 	u32 fl_align = clsz < 32 ? 32 : clsz;
1317 	u32 pack_align;
1318 	u32 ingpad, ingpack;
1319 	int pcie_cap;
1320 
1321 	csio_wr_reg32(hw, HOSTPAGESIZEPF0_V(s_hps) | HOSTPAGESIZEPF1_V(s_hps) |
1322 		      HOSTPAGESIZEPF2_V(s_hps) | HOSTPAGESIZEPF3_V(s_hps) |
1323 		      HOSTPAGESIZEPF4_V(s_hps) | HOSTPAGESIZEPF5_V(s_hps) |
1324 		      HOSTPAGESIZEPF6_V(s_hps) | HOSTPAGESIZEPF7_V(s_hps),
1325 		      SGE_HOST_PAGE_SIZE_A);
1326 
1327 	/* T5 introduced the separation of the Free List Padding and
1328 	 * Packing Boundaries.  Thus, we can select a smaller Padding
1329 	 * Boundary to avoid uselessly chewing up PCIe Link and Memory
1330 	 * Bandwidth, and use a Packing Boundary which is large enough
1331 	 * to avoid false sharing between CPUs, etc.
1332 	 *
1333 	 * For the PCI Link, the smaller the Padding Boundary the
1334 	 * better.  For the Memory Controller, a smaller Padding
1335 	 * Boundary is better until we cross under the Memory Line
1336 	 * Size (the minimum unit of transfer to/from Memory).  If we
1337 	 * have a Padding Boundary which is smaller than the Memory
1338 	 * Line Size, that'll involve a Read-Modify-Write cycle on the
1339 	 * Memory Controller which is never good.
1340 	 */
1341 
1342 	/* We want the Packing Boundary to be based on the Cache Line
1343 	 * Size in order to help avoid False Sharing performance
1344 	 * issues between CPUs, etc.  We also want the Packing
1345 	 * Boundary to incorporate the PCI-E Maximum Payload Size.  We
1346 	 * get best performance when the Packing Boundary is a
1347 	 * multiple of the Maximum Payload Size.
1348 	 */
1349 	pack_align = fl_align;
1350 	pcie_cap = pci_find_capability(hw->pdev, PCI_CAP_ID_EXP);
1351 	if (pcie_cap) {
1352 		u32 mps, mps_log;
1353 		u16 devctl;
1354 
1355 		/* The PCIe Device Control Maximum Payload Size field
1356 		 * [bits 7:5] encodes sizes as powers of 2 starting at
1357 		 * 128 bytes.
1358 		 */
1359 		pci_read_config_word(hw->pdev,
1360 				     pcie_cap + PCI_EXP_DEVCTL,
1361 				     &devctl);
1362 		mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
1363 		mps = 1 << mps_log;
1364 		if (mps > pack_align)
1365 			pack_align = mps;
1366 	}
1367 
1368 	/* T5/T6 have a special interpretation of the "0"
1369 	 * value for the Packing Boundary.  This corresponds to 16
1370 	 * bytes instead of the expected 32 bytes.
1371 	 */
1372 	if (pack_align <= 16) {
1373 		ingpack = INGPACKBOUNDARY_16B_X;
1374 		fl_align = 16;
1375 	} else if (pack_align == 32) {
1376 		ingpack = INGPACKBOUNDARY_64B_X;
1377 		fl_align = 64;
1378 	} else {
1379 		u32 pack_align_log = fls(pack_align) - 1;
1380 
1381 		ingpack = pack_align_log - INGPACKBOUNDARY_SHIFT_X;
1382 		fl_align = pack_align;
1383 	}
1384 
1385 	/* Use the smallest Ingress Padding which isn't smaller than
1386 	 * the Memory Controller Read/Write Size.  We'll take that as
1387 	 * being 8 bytes since we don't know of any system with a
1388 	 * wider Memory Controller Bus Width.
1389 	 */
1390 	if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK))
1391 		ingpad = INGPADBOUNDARY_32B_X;
1392 	else
1393 		ingpad = T6_INGPADBOUNDARY_8B_X;
1394 
1395 	csio_set_reg_field(hw, SGE_CONTROL_A,
1396 			   INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
1397 			   EGRSTATUSPAGESIZE_F,
1398 			   INGPADBOUNDARY_V(ingpad) |
1399 			   EGRSTATUSPAGESIZE_V(stat_len != 64));
1400 	csio_set_reg_field(hw, SGE_CONTROL2_A,
1401 			   INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
1402 			   INGPACKBOUNDARY_V(ingpack));
1403 
1404 	/* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */
1405 	csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0_A);
1406 
1407 	/*
1408 	 * If using hard params, the following will get set correctly
1409 	 * in csio_wr_set_sge().
1410 	 */
1411 	if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS) {
1412 		csio_wr_reg32(hw,
1413 			(csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2_A) +
1414 			fl_align - 1) & ~(fl_align - 1),
1415 			SGE_FL_BUFFER_SIZE2_A);
1416 		csio_wr_reg32(hw,
1417 			(csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3_A) +
1418 			fl_align - 1) & ~(fl_align - 1),
1419 			SGE_FL_BUFFER_SIZE3_A);
1420 	}
1421 
1422 	sge->csio_fl_align = fl_align;
1423 
1424 	csio_wr_reg32(hw, HPZ0_V(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ_A);
1425 
1426 	/* default value of rx_dma_offset of the NIC driver */
1427 	csio_set_reg_field(hw, SGE_CONTROL_A,
1428 			   PKTSHIFT_V(PKTSHIFT_M),
1429 			   PKTSHIFT_V(CSIO_SGE_RX_DMA_OFFSET));
1430 
1431 	csio_hw_tp_wr_bits_indirect(hw, TP_INGRESS_CONFIG_A,
1432 				    CSUM_HAS_PSEUDO_HDR_F, 0);
1433 }
1434 
1435 static void
1436 csio_init_intr_coalesce_parms(struct csio_hw *hw)
1437 {
1438 	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1439 	struct csio_sge *sge = &wrm->sge;
1440 
1441 	csio_sge_thresh_reg = csio_closest_thresh(sge, csio_intr_coalesce_cnt);
1442 	if (csio_intr_coalesce_cnt) {
1443 		csio_sge_thresh_reg = 0;
1444 		csio_sge_timer_reg = X_TIMERREG_RESTART_COUNTER;
1445 		return;
1446 	}
1447 
1448 	csio_sge_timer_reg = csio_closest_timer(sge, csio_intr_coalesce_time);
1449 }
1450 
1451 /*
1452  * csio_wr_get_sge - Get SGE register values.
1453  * @hw: HW module.
1454  *
1455  * Used by non-master functions and by master-functions relying on config file.
1456  */
1457 static void
1458 csio_wr_get_sge(struct csio_hw *hw)
1459 {
1460 	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1461 	struct csio_sge *sge = &wrm->sge;
1462 	uint32_t ingpad;
1463 	int i;
1464 	u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
1465 	u32 ingress_rx_threshold;
1466 
1467 	sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL_A);
1468 
1469 	ingpad = INGPADBOUNDARY_G(sge->sge_control);
1470 
1471 	switch (ingpad) {
1472 	case X_INGPCIEBOUNDARY_32B:
1473 		sge->csio_fl_align = 32; break;
1474 	case X_INGPCIEBOUNDARY_64B:
1475 		sge->csio_fl_align = 64; break;
1476 	case X_INGPCIEBOUNDARY_128B:
1477 		sge->csio_fl_align = 128; break;
1478 	case X_INGPCIEBOUNDARY_256B:
1479 		sge->csio_fl_align = 256; break;
1480 	case X_INGPCIEBOUNDARY_512B:
1481 		sge->csio_fl_align = 512; break;
1482 	case X_INGPCIEBOUNDARY_1024B:
1483 		sge->csio_fl_align = 1024; break;
1484 	case X_INGPCIEBOUNDARY_2048B:
1485 		sge->csio_fl_align = 2048; break;
1486 	case X_INGPCIEBOUNDARY_4096B:
1487 		sge->csio_fl_align = 4096; break;
1488 	}
1489 
1490 	for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++)
1491 		csio_get_flbuf_size(hw, sge, i);
1492 
1493 	timer_value_0_and_1 = csio_rd_reg32(hw, SGE_TIMER_VALUE_0_AND_1_A);
1494 	timer_value_2_and_3 = csio_rd_reg32(hw, SGE_TIMER_VALUE_2_AND_3_A);
1495 	timer_value_4_and_5 = csio_rd_reg32(hw, SGE_TIMER_VALUE_4_AND_5_A);
1496 
1497 	sge->timer_val[0] = (uint16_t)csio_core_ticks_to_us(hw,
1498 					TIMERVALUE0_G(timer_value_0_and_1));
1499 	sge->timer_val[1] = (uint16_t)csio_core_ticks_to_us(hw,
1500 					TIMERVALUE1_G(timer_value_0_and_1));
1501 	sge->timer_val[2] = (uint16_t)csio_core_ticks_to_us(hw,
1502 					TIMERVALUE2_G(timer_value_2_and_3));
1503 	sge->timer_val[3] = (uint16_t)csio_core_ticks_to_us(hw,
1504 					TIMERVALUE3_G(timer_value_2_and_3));
1505 	sge->timer_val[4] = (uint16_t)csio_core_ticks_to_us(hw,
1506 					TIMERVALUE4_G(timer_value_4_and_5));
1507 	sge->timer_val[5] = (uint16_t)csio_core_ticks_to_us(hw,
1508 					TIMERVALUE5_G(timer_value_4_and_5));
1509 
1510 	ingress_rx_threshold = csio_rd_reg32(hw, SGE_INGRESS_RX_THRESHOLD_A);
1511 	sge->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
1512 	sge->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
1513 	sge->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
1514 	sge->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
1515 
1516 	csio_init_intr_coalesce_parms(hw);
1517 }
1518 
1519 /*
1520  * csio_wr_set_sge - Initialize SGE registers
1521  * @hw: HW module.
1522  *
1523  * Used by Master function to initialize SGE registers in the absence
1524  * of a config file.
1525  */
1526 static void
1527 csio_wr_set_sge(struct csio_hw *hw)
1528 {
1529 	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1530 	struct csio_sge *sge = &wrm->sge;
1531 	int i;
1532 
1533 	/*
1534 	 * Set up our basic SGE mode to deliver CPL messages to our Ingress
1535 	 * Queue and Packet Date to the Free List.
1536 	 */
1537 	csio_set_reg_field(hw, SGE_CONTROL_A, RXPKTCPLMODE_F, RXPKTCPLMODE_F);
1538 
1539 	sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL_A);
1540 
1541 	/* sge->csio_fl_align is set up by csio_wr_fixup_host_params(). */
1542 
1543 	/*
1544 	 * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
1545 	 * and generate an interrupt when this occurs so we can recover.
1546 	 */
1547 	csio_set_reg_field(hw, SGE_DBFIFO_STATUS_A,
1548 			   LP_INT_THRESH_T5_V(LP_INT_THRESH_T5_M),
1549 			   LP_INT_THRESH_T5_V(CSIO_SGE_DBFIFO_INT_THRESH));
1550 	csio_set_reg_field(hw, SGE_DBFIFO_STATUS2_A,
1551 			   HP_INT_THRESH_T5_V(LP_INT_THRESH_T5_M),
1552 			   HP_INT_THRESH_T5_V(CSIO_SGE_DBFIFO_INT_THRESH));
1553 
1554 	csio_set_reg_field(hw, SGE_DOORBELL_CONTROL_A, ENABLE_DROP_F,
1555 			   ENABLE_DROP_F);
1556 
1557 	/* SGE_FL_BUFFER_SIZE0 is set up by csio_wr_fixup_host_params(). */
1558 
1559 	CSIO_SET_FLBUF_SIZE(hw, 1, CSIO_SGE_FLBUF_SIZE1);
1560 	csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE2 + sge->csio_fl_align - 1)
1561 		      & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE2_A);
1562 	csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE3 + sge->csio_fl_align - 1)
1563 		      & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE3_A);
1564 	CSIO_SET_FLBUF_SIZE(hw, 4, CSIO_SGE_FLBUF_SIZE4);
1565 	CSIO_SET_FLBUF_SIZE(hw, 5, CSIO_SGE_FLBUF_SIZE5);
1566 	CSIO_SET_FLBUF_SIZE(hw, 6, CSIO_SGE_FLBUF_SIZE6);
1567 	CSIO_SET_FLBUF_SIZE(hw, 7, CSIO_SGE_FLBUF_SIZE7);
1568 	CSIO_SET_FLBUF_SIZE(hw, 8, CSIO_SGE_FLBUF_SIZE8);
1569 
1570 	for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++)
1571 		csio_get_flbuf_size(hw, sge, i);
1572 
1573 	/* Initialize interrupt coalescing attributes */
1574 	sge->timer_val[0] = CSIO_SGE_TIMER_VAL_0;
1575 	sge->timer_val[1] = CSIO_SGE_TIMER_VAL_1;
1576 	sge->timer_val[2] = CSIO_SGE_TIMER_VAL_2;
1577 	sge->timer_val[3] = CSIO_SGE_TIMER_VAL_3;
1578 	sge->timer_val[4] = CSIO_SGE_TIMER_VAL_4;
1579 	sge->timer_val[5] = CSIO_SGE_TIMER_VAL_5;
1580 
1581 	sge->counter_val[0] = CSIO_SGE_INT_CNT_VAL_0;
1582 	sge->counter_val[1] = CSIO_SGE_INT_CNT_VAL_1;
1583 	sge->counter_val[2] = CSIO_SGE_INT_CNT_VAL_2;
1584 	sge->counter_val[3] = CSIO_SGE_INT_CNT_VAL_3;
1585 
1586 	csio_wr_reg32(hw, THRESHOLD_0_V(sge->counter_val[0]) |
1587 		      THRESHOLD_1_V(sge->counter_val[1]) |
1588 		      THRESHOLD_2_V(sge->counter_val[2]) |
1589 		      THRESHOLD_3_V(sge->counter_val[3]),
1590 		      SGE_INGRESS_RX_THRESHOLD_A);
1591 
1592 	csio_wr_reg32(hw,
1593 		   TIMERVALUE0_V(csio_us_to_core_ticks(hw, sge->timer_val[0])) |
1594 		   TIMERVALUE1_V(csio_us_to_core_ticks(hw, sge->timer_val[1])),
1595 		   SGE_TIMER_VALUE_0_AND_1_A);
1596 
1597 	csio_wr_reg32(hw,
1598 		   TIMERVALUE2_V(csio_us_to_core_ticks(hw, sge->timer_val[2])) |
1599 		   TIMERVALUE3_V(csio_us_to_core_ticks(hw, sge->timer_val[3])),
1600 		   SGE_TIMER_VALUE_2_AND_3_A);
1601 
1602 	csio_wr_reg32(hw,
1603 		   TIMERVALUE4_V(csio_us_to_core_ticks(hw, sge->timer_val[4])) |
1604 		   TIMERVALUE5_V(csio_us_to_core_ticks(hw, sge->timer_val[5])),
1605 		   SGE_TIMER_VALUE_4_AND_5_A);
1606 
1607 	csio_init_intr_coalesce_parms(hw);
1608 }
1609 
1610 void
1611 csio_wr_sge_init(struct csio_hw *hw)
1612 {
1613 	/*
1614 	 * If we are master and chip is not initialized:
1615 	 *    - If we plan to use the config file, we need to fixup some
1616 	 *      host specific registers, and read the rest of the SGE
1617 	 *      configuration.
1618 	 *    - If we dont plan to use the config file, we need to initialize
1619 	 *      SGE entirely, including fixing the host specific registers.
1620 	 * If we are master and chip is initialized, just read and work off of
1621 	 *	the already initialized SGE values.
1622 	 * If we arent the master, we are only allowed to read and work off of
1623 	 *      the already initialized SGE values.
1624 	 *
1625 	 * Therefore, before calling this function, we assume that the master-
1626 	 * ship of the card, state and whether to use config file or not, have
1627 	 * already been decided.
1628 	 */
1629 	if (csio_is_hw_master(hw)) {
1630 		if (hw->fw_state != CSIO_DEV_STATE_INIT)
1631 			csio_wr_fixup_host_params(hw);
1632 
1633 		if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS)
1634 			csio_wr_get_sge(hw);
1635 		else
1636 			csio_wr_set_sge(hw);
1637 	} else
1638 		csio_wr_get_sge(hw);
1639 }
1640 
1641 /*
1642  * csio_wrm_init - Initialize Work request module.
1643  * @wrm: WR module
1644  * @hw: HW pointer
1645  *
1646  * Allocates memory for an array of queue pointers starting at q_arr.
1647  */
1648 int
1649 csio_wrm_init(struct csio_wrm *wrm, struct csio_hw *hw)
1650 {
1651 	int i;
1652 
1653 	if (!wrm->num_q) {
1654 		csio_err(hw, "Num queues is not set\n");
1655 		return -EINVAL;
1656 	}
1657 
1658 	wrm->q_arr = kcalloc(wrm->num_q, sizeof(struct csio_q *), GFP_KERNEL);
1659 	if (!wrm->q_arr)
1660 		goto err;
1661 
1662 	for (i = 0; i < wrm->num_q; i++) {
1663 		wrm->q_arr[i] = kzalloc(sizeof(struct csio_q), GFP_KERNEL);
1664 		if (!wrm->q_arr[i]) {
1665 			while (--i >= 0)
1666 				kfree(wrm->q_arr[i]);
1667 			goto err_free_arr;
1668 		}
1669 	}
1670 	wrm->free_qidx	= 0;
1671 
1672 	return 0;
1673 
1674 err_free_arr:
1675 	kfree(wrm->q_arr);
1676 err:
1677 	return -ENOMEM;
1678 }
1679 
1680 /*
1681  * csio_wrm_exit - Initialize Work request module.
1682  * @wrm: WR module
1683  * @hw: HW module
1684  *
1685  * Uninitialize WR module. Free q_arr and pointers in it.
1686  * We have the additional job of freeing the DMA memory associated
1687  * with the queues.
1688  */
1689 void
1690 csio_wrm_exit(struct csio_wrm *wrm, struct csio_hw *hw)
1691 {
1692 	int i;
1693 	uint32_t j;
1694 	struct csio_q *q;
1695 	struct csio_dma_buf *buf;
1696 
1697 	for (i = 0; i < wrm->num_q; i++) {
1698 		q = wrm->q_arr[i];
1699 
1700 		if (wrm->free_qidx && (i < wrm->free_qidx)) {
1701 			if (q->type == CSIO_FREELIST) {
1702 				if (!q->un.fl.bufs)
1703 					continue;
1704 				for (j = 0; j < q->credits; j++) {
1705 					buf = &q->un.fl.bufs[j];
1706 					if (!buf->vaddr)
1707 						continue;
1708 					dma_free_coherent(&hw->pdev->dev,
1709 							buf->len, buf->vaddr,
1710 							buf->paddr);
1711 				}
1712 				kfree(q->un.fl.bufs);
1713 			}
1714 			dma_free_coherent(&hw->pdev->dev, q->size,
1715 					q->vstart, q->pstart);
1716 		}
1717 		kfree(q);
1718 	}
1719 
1720 	hw->flags &= ~CSIO_HWF_Q_MEM_ALLOCED;
1721 
1722 	kfree(wrm->q_arr);
1723 }
1724