xref: /openbmc/linux/drivers/scsi/csiostor/csio_wr.c (revision a3667aaed5698b84bad2f1b3f71adc86499f4bc6)
1*a3667aaeSNaresh Kumar Inna /*
2*a3667aaeSNaresh Kumar Inna  * This file is part of the Chelsio FCoE driver for Linux.
3*a3667aaeSNaresh Kumar Inna  *
4*a3667aaeSNaresh Kumar Inna  * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
5*a3667aaeSNaresh Kumar Inna  *
6*a3667aaeSNaresh Kumar Inna  * This software is available to you under a choice of one of two
7*a3667aaeSNaresh Kumar Inna  * licenses.  You may choose to be licensed under the terms of the GNU
8*a3667aaeSNaresh Kumar Inna  * General Public License (GPL) Version 2, available from the file
9*a3667aaeSNaresh Kumar Inna  * COPYING in the main directory of this source tree, or the
10*a3667aaeSNaresh Kumar Inna  * OpenIB.org BSD license below:
11*a3667aaeSNaresh Kumar Inna  *
12*a3667aaeSNaresh Kumar Inna  *     Redistribution and use in source and binary forms, with or
13*a3667aaeSNaresh Kumar Inna  *     without modification, are permitted provided that the following
14*a3667aaeSNaresh Kumar Inna  *     conditions are met:
15*a3667aaeSNaresh Kumar Inna  *
16*a3667aaeSNaresh Kumar Inna  *      - Redistributions of source code must retain the above
17*a3667aaeSNaresh Kumar Inna  *        copyright notice, this list of conditions and the following
18*a3667aaeSNaresh Kumar Inna  *        disclaimer.
19*a3667aaeSNaresh Kumar Inna  *
20*a3667aaeSNaresh Kumar Inna  *      - Redistributions in binary form must reproduce the above
21*a3667aaeSNaresh Kumar Inna  *        copyright notice, this list of conditions and the following
22*a3667aaeSNaresh Kumar Inna  *        disclaimer in the documentation and/or other materials
23*a3667aaeSNaresh Kumar Inna  *        provided with the distribution.
24*a3667aaeSNaresh Kumar Inna  *
25*a3667aaeSNaresh Kumar Inna  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26*a3667aaeSNaresh Kumar Inna  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27*a3667aaeSNaresh Kumar Inna  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28*a3667aaeSNaresh Kumar Inna  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29*a3667aaeSNaresh Kumar Inna  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30*a3667aaeSNaresh Kumar Inna  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31*a3667aaeSNaresh Kumar Inna  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32*a3667aaeSNaresh Kumar Inna  * SOFTWARE.
33*a3667aaeSNaresh Kumar Inna  */
34*a3667aaeSNaresh Kumar Inna 
35*a3667aaeSNaresh Kumar Inna #include <linux/kernel.h>
36*a3667aaeSNaresh Kumar Inna #include <linux/string.h>
37*a3667aaeSNaresh Kumar Inna #include <linux/compiler.h>
38*a3667aaeSNaresh Kumar Inna #include <linux/slab.h>
39*a3667aaeSNaresh Kumar Inna #include <asm/page.h>
40*a3667aaeSNaresh Kumar Inna #include <linux/cache.h>
41*a3667aaeSNaresh Kumar Inna 
42*a3667aaeSNaresh Kumar Inna #include "csio_hw.h"
43*a3667aaeSNaresh Kumar Inna #include "csio_wr.h"
44*a3667aaeSNaresh Kumar Inna #include "csio_mb.h"
45*a3667aaeSNaresh Kumar Inna #include "csio_defs.h"
46*a3667aaeSNaresh Kumar Inna 
47*a3667aaeSNaresh Kumar Inna int csio_intr_coalesce_cnt;		/* value:SGE_INGRESS_RX_THRESHOLD[0] */
48*a3667aaeSNaresh Kumar Inna static int csio_sge_thresh_reg;		/* SGE_INGRESS_RX_THRESHOLD[0] */
49*a3667aaeSNaresh Kumar Inna 
50*a3667aaeSNaresh Kumar Inna int csio_intr_coalesce_time = 10;	/* value:SGE_TIMER_VALUE_1 */
51*a3667aaeSNaresh Kumar Inna static int csio_sge_timer_reg = 1;
52*a3667aaeSNaresh Kumar Inna 
53*a3667aaeSNaresh Kumar Inna #define CSIO_SET_FLBUF_SIZE(_hw, _reg, _val)				\
54*a3667aaeSNaresh Kumar Inna 	csio_wr_reg32((_hw), (_val), SGE_FL_BUFFER_SIZE##_reg)
55*a3667aaeSNaresh Kumar Inna 
56*a3667aaeSNaresh Kumar Inna static void
57*a3667aaeSNaresh Kumar Inna csio_get_flbuf_size(struct csio_hw *hw, struct csio_sge *sge, uint32_t reg)
58*a3667aaeSNaresh Kumar Inna {
59*a3667aaeSNaresh Kumar Inna 	sge->sge_fl_buf_size[reg] = csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE0 +
60*a3667aaeSNaresh Kumar Inna 							reg * sizeof(uint32_t));
61*a3667aaeSNaresh Kumar Inna }
62*a3667aaeSNaresh Kumar Inna 
63*a3667aaeSNaresh Kumar Inna /* Free list buffer size */
64*a3667aaeSNaresh Kumar Inna static inline uint32_t
65*a3667aaeSNaresh Kumar Inna csio_wr_fl_bufsz(struct csio_sge *sge, struct csio_dma_buf *buf)
66*a3667aaeSNaresh Kumar Inna {
67*a3667aaeSNaresh Kumar Inna 	return sge->sge_fl_buf_size[buf->paddr & 0xF];
68*a3667aaeSNaresh Kumar Inna }
69*a3667aaeSNaresh Kumar Inna 
70*a3667aaeSNaresh Kumar Inna /* Size of the egress queue status page */
71*a3667aaeSNaresh Kumar Inna static inline uint32_t
72*a3667aaeSNaresh Kumar Inna csio_wr_qstat_pgsz(struct csio_hw *hw)
73*a3667aaeSNaresh Kumar Inna {
74*a3667aaeSNaresh Kumar Inna 	return (hw->wrm.sge.sge_control & EGRSTATUSPAGESIZE(1)) ?  128 : 64;
75*a3667aaeSNaresh Kumar Inna }
76*a3667aaeSNaresh Kumar Inna 
77*a3667aaeSNaresh Kumar Inna /* Ring freelist doorbell */
78*a3667aaeSNaresh Kumar Inna static inline void
79*a3667aaeSNaresh Kumar Inna csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq)
80*a3667aaeSNaresh Kumar Inna {
81*a3667aaeSNaresh Kumar Inna 	/*
82*a3667aaeSNaresh Kumar Inna 	 * Ring the doorbell only when we have atleast CSIO_QCREDIT_SZ
83*a3667aaeSNaresh Kumar Inna 	 * number of bytes in the freelist queue. This translates to atleast
84*a3667aaeSNaresh Kumar Inna 	 * 8 freelist buffer pointers (since each pointer is 8 bytes).
85*a3667aaeSNaresh Kumar Inna 	 */
86*a3667aaeSNaresh Kumar Inna 	if (flq->inc_idx >= 8) {
87*a3667aaeSNaresh Kumar Inna 		csio_wr_reg32(hw, DBPRIO(1) | QID(flq->un.fl.flid) |
88*a3667aaeSNaresh Kumar Inna 			      PIDX(flq->inc_idx / 8),
89*a3667aaeSNaresh Kumar Inna 			      MYPF_REG(SGE_PF_KDOORBELL));
90*a3667aaeSNaresh Kumar Inna 		flq->inc_idx &= 7;
91*a3667aaeSNaresh Kumar Inna 	}
92*a3667aaeSNaresh Kumar Inna }
93*a3667aaeSNaresh Kumar Inna 
94*a3667aaeSNaresh Kumar Inna /* Write a 0 cidx increment value to enable SGE interrupts for this queue */
95*a3667aaeSNaresh Kumar Inna static void
96*a3667aaeSNaresh Kumar Inna csio_wr_sge_intr_enable(struct csio_hw *hw, uint16_t iqid)
97*a3667aaeSNaresh Kumar Inna {
98*a3667aaeSNaresh Kumar Inna 	csio_wr_reg32(hw, CIDXINC(0)		|
99*a3667aaeSNaresh Kumar Inna 			  INGRESSQID(iqid)	|
100*a3667aaeSNaresh Kumar Inna 			  TIMERREG(X_TIMERREG_RESTART_COUNTER),
101*a3667aaeSNaresh Kumar Inna 			  MYPF_REG(SGE_PF_GTS));
102*a3667aaeSNaresh Kumar Inna }
103*a3667aaeSNaresh Kumar Inna 
104*a3667aaeSNaresh Kumar Inna /*
105*a3667aaeSNaresh Kumar Inna  * csio_wr_fill_fl - Populate the FL buffers of a FL queue.
106*a3667aaeSNaresh Kumar Inna  * @hw: HW module.
107*a3667aaeSNaresh Kumar Inna  * @flq: Freelist queue.
108*a3667aaeSNaresh Kumar Inna  *
109*a3667aaeSNaresh Kumar Inna  * Fill up freelist buffer entries with buffers of size specified
110*a3667aaeSNaresh Kumar Inna  * in the size register.
111*a3667aaeSNaresh Kumar Inna  *
112*a3667aaeSNaresh Kumar Inna  */
113*a3667aaeSNaresh Kumar Inna static int
114*a3667aaeSNaresh Kumar Inna csio_wr_fill_fl(struct csio_hw *hw, struct csio_q *flq)
115*a3667aaeSNaresh Kumar Inna {
116*a3667aaeSNaresh Kumar Inna 	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
117*a3667aaeSNaresh Kumar Inna 	struct csio_sge *sge = &wrm->sge;
118*a3667aaeSNaresh Kumar Inna 	__be64 *d = (__be64 *)(flq->vstart);
119*a3667aaeSNaresh Kumar Inna 	struct csio_dma_buf *buf = &flq->un.fl.bufs[0];
120*a3667aaeSNaresh Kumar Inna 	uint64_t paddr;
121*a3667aaeSNaresh Kumar Inna 	int sreg = flq->un.fl.sreg;
122*a3667aaeSNaresh Kumar Inna 	int n = flq->credits;
123*a3667aaeSNaresh Kumar Inna 
124*a3667aaeSNaresh Kumar Inna 	while (n--) {
125*a3667aaeSNaresh Kumar Inna 		buf->len = sge->sge_fl_buf_size[sreg];
126*a3667aaeSNaresh Kumar Inna 		buf->vaddr = pci_alloc_consistent(hw->pdev, buf->len,
127*a3667aaeSNaresh Kumar Inna 						  &buf->paddr);
128*a3667aaeSNaresh Kumar Inna 		if (!buf->vaddr) {
129*a3667aaeSNaresh Kumar Inna 			csio_err(hw, "Could only fill %d buffers!\n", n + 1);
130*a3667aaeSNaresh Kumar Inna 			return -ENOMEM;
131*a3667aaeSNaresh Kumar Inna 		}
132*a3667aaeSNaresh Kumar Inna 
133*a3667aaeSNaresh Kumar Inna 		paddr = buf->paddr | (sreg & 0xF);
134*a3667aaeSNaresh Kumar Inna 
135*a3667aaeSNaresh Kumar Inna 		*d++ = cpu_to_be64(paddr);
136*a3667aaeSNaresh Kumar Inna 		buf++;
137*a3667aaeSNaresh Kumar Inna 	}
138*a3667aaeSNaresh Kumar Inna 
139*a3667aaeSNaresh Kumar Inna 	return 0;
140*a3667aaeSNaresh Kumar Inna }
141*a3667aaeSNaresh Kumar Inna 
142*a3667aaeSNaresh Kumar Inna /*
143*a3667aaeSNaresh Kumar Inna  * csio_wr_update_fl -
144*a3667aaeSNaresh Kumar Inna  * @hw: HW module.
145*a3667aaeSNaresh Kumar Inna  * @flq: Freelist queue.
146*a3667aaeSNaresh Kumar Inna  *
147*a3667aaeSNaresh Kumar Inna  *
148*a3667aaeSNaresh Kumar Inna  */
149*a3667aaeSNaresh Kumar Inna static inline void
150*a3667aaeSNaresh Kumar Inna csio_wr_update_fl(struct csio_hw *hw, struct csio_q *flq, uint16_t n)
151*a3667aaeSNaresh Kumar Inna {
152*a3667aaeSNaresh Kumar Inna 
153*a3667aaeSNaresh Kumar Inna 	flq->inc_idx += n;
154*a3667aaeSNaresh Kumar Inna 	flq->pidx += n;
155*a3667aaeSNaresh Kumar Inna 	if (unlikely(flq->pidx >= flq->credits))
156*a3667aaeSNaresh Kumar Inna 		flq->pidx -= (uint16_t)flq->credits;
157*a3667aaeSNaresh Kumar Inna 
158*a3667aaeSNaresh Kumar Inna 	CSIO_INC_STATS(flq, n_flq_refill);
159*a3667aaeSNaresh Kumar Inna }
160*a3667aaeSNaresh Kumar Inna 
161*a3667aaeSNaresh Kumar Inna /*
162*a3667aaeSNaresh Kumar Inna  * csio_wr_alloc_q - Allocate a WR queue and initialize it.
163*a3667aaeSNaresh Kumar Inna  * @hw: HW module
164*a3667aaeSNaresh Kumar Inna  * @qsize: Size of the queue in bytes
165*a3667aaeSNaresh Kumar Inna  * @wrsize: Since of WR in this queue, if fixed.
166*a3667aaeSNaresh Kumar Inna  * @type: Type of queue (Ingress/Egress/Freelist)
167*a3667aaeSNaresh Kumar Inna  * @owner: Module that owns this queue.
168*a3667aaeSNaresh Kumar Inna  * @nflb: Number of freelist buffers for FL.
169*a3667aaeSNaresh Kumar Inna  * @sreg: What is the FL buffer size register?
170*a3667aaeSNaresh Kumar Inna  * @iq_int_handler: Ingress queue handler in INTx mode.
171*a3667aaeSNaresh Kumar Inna  *
172*a3667aaeSNaresh Kumar Inna  * This function allocates and sets up a queue for the caller
173*a3667aaeSNaresh Kumar Inna  * of size qsize, aligned at the required boundary. This is subject to
174*a3667aaeSNaresh Kumar Inna  * be free entries being available in the queue array. If one is found,
175*a3667aaeSNaresh Kumar Inna  * it is initialized with the allocated queue, marked as being used (owner),
176*a3667aaeSNaresh Kumar Inna  * and a handle returned to the caller in form of the queue's index
177*a3667aaeSNaresh Kumar Inna  * into the q_arr array.
178*a3667aaeSNaresh Kumar Inna  * If user has indicated a freelist (by specifying nflb > 0), create
179*a3667aaeSNaresh Kumar Inna  * another queue (with its own index into q_arr) for the freelist. Allocate
180*a3667aaeSNaresh Kumar Inna  * memory for DMA buffer metadata (vaddr, len etc). Save off the freelist
181*a3667aaeSNaresh Kumar Inna  * idx in the ingress queue's flq.idx. This is how a Freelist is associated
182*a3667aaeSNaresh Kumar Inna  * with its owning ingress queue.
183*a3667aaeSNaresh Kumar Inna  */
184*a3667aaeSNaresh Kumar Inna int
185*a3667aaeSNaresh Kumar Inna csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize,
186*a3667aaeSNaresh Kumar Inna 		uint16_t type, void *owner, uint32_t nflb, int sreg,
187*a3667aaeSNaresh Kumar Inna 		iq_handler_t iq_intx_handler)
188*a3667aaeSNaresh Kumar Inna {
189*a3667aaeSNaresh Kumar Inna 	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
190*a3667aaeSNaresh Kumar Inna 	struct csio_q	*q, *flq;
191*a3667aaeSNaresh Kumar Inna 	int		free_idx = wrm->free_qidx;
192*a3667aaeSNaresh Kumar Inna 	int		ret_idx = free_idx;
193*a3667aaeSNaresh Kumar Inna 	uint32_t	qsz;
194*a3667aaeSNaresh Kumar Inna 	int flq_idx;
195*a3667aaeSNaresh Kumar Inna 
196*a3667aaeSNaresh Kumar Inna 	if (free_idx >= wrm->num_q) {
197*a3667aaeSNaresh Kumar Inna 		csio_err(hw, "No more free queues.\n");
198*a3667aaeSNaresh Kumar Inna 		return -1;
199*a3667aaeSNaresh Kumar Inna 	}
200*a3667aaeSNaresh Kumar Inna 
201*a3667aaeSNaresh Kumar Inna 	switch (type) {
202*a3667aaeSNaresh Kumar Inna 	case CSIO_EGRESS:
203*a3667aaeSNaresh Kumar Inna 		qsz = ALIGN(qsize, CSIO_QCREDIT_SZ) + csio_wr_qstat_pgsz(hw);
204*a3667aaeSNaresh Kumar Inna 		break;
205*a3667aaeSNaresh Kumar Inna 	case CSIO_INGRESS:
206*a3667aaeSNaresh Kumar Inna 		switch (wrsize) {
207*a3667aaeSNaresh Kumar Inna 		case 16:
208*a3667aaeSNaresh Kumar Inna 		case 32:
209*a3667aaeSNaresh Kumar Inna 		case 64:
210*a3667aaeSNaresh Kumar Inna 		case 128:
211*a3667aaeSNaresh Kumar Inna 			break;
212*a3667aaeSNaresh Kumar Inna 		default:
213*a3667aaeSNaresh Kumar Inna 			csio_err(hw, "Invalid Ingress queue WR size:%d\n",
214*a3667aaeSNaresh Kumar Inna 				    wrsize);
215*a3667aaeSNaresh Kumar Inna 			return -1;
216*a3667aaeSNaresh Kumar Inna 		}
217*a3667aaeSNaresh Kumar Inna 
218*a3667aaeSNaresh Kumar Inna 		/*
219*a3667aaeSNaresh Kumar Inna 		 * Number of elements must be a multiple of 16
220*a3667aaeSNaresh Kumar Inna 		 * So this includes status page size
221*a3667aaeSNaresh Kumar Inna 		 */
222*a3667aaeSNaresh Kumar Inna 		qsz = ALIGN(qsize/wrsize, 16) * wrsize;
223*a3667aaeSNaresh Kumar Inna 
224*a3667aaeSNaresh Kumar Inna 		break;
225*a3667aaeSNaresh Kumar Inna 	case CSIO_FREELIST:
226*a3667aaeSNaresh Kumar Inna 		qsz = ALIGN(qsize/wrsize, 8) * wrsize + csio_wr_qstat_pgsz(hw);
227*a3667aaeSNaresh Kumar Inna 		break;
228*a3667aaeSNaresh Kumar Inna 	default:
229*a3667aaeSNaresh Kumar Inna 		csio_err(hw, "Invalid queue type: 0x%x\n", type);
230*a3667aaeSNaresh Kumar Inna 		return -1;
231*a3667aaeSNaresh Kumar Inna 	}
232*a3667aaeSNaresh Kumar Inna 
233*a3667aaeSNaresh Kumar Inna 	q = wrm->q_arr[free_idx];
234*a3667aaeSNaresh Kumar Inna 
235*a3667aaeSNaresh Kumar Inna 	q->vstart = pci_alloc_consistent(hw->pdev, qsz, &q->pstart);
236*a3667aaeSNaresh Kumar Inna 	if (!q->vstart) {
237*a3667aaeSNaresh Kumar Inna 		csio_err(hw,
238*a3667aaeSNaresh Kumar Inna 			 "Failed to allocate DMA memory for "
239*a3667aaeSNaresh Kumar Inna 			 "queue at id: %d size: %d\n", free_idx, qsize);
240*a3667aaeSNaresh Kumar Inna 		return -1;
241*a3667aaeSNaresh Kumar Inna 	}
242*a3667aaeSNaresh Kumar Inna 
243*a3667aaeSNaresh Kumar Inna 	/*
244*a3667aaeSNaresh Kumar Inna 	 * We need to zero out the contents, importantly for ingress,
245*a3667aaeSNaresh Kumar Inna 	 * since we start with a generatiom bit of 1 for ingress.
246*a3667aaeSNaresh Kumar Inna 	 */
247*a3667aaeSNaresh Kumar Inna 	memset(q->vstart, 0, qsz);
248*a3667aaeSNaresh Kumar Inna 
249*a3667aaeSNaresh Kumar Inna 	q->type		= type;
250*a3667aaeSNaresh Kumar Inna 	q->owner	= owner;
251*a3667aaeSNaresh Kumar Inna 	q->pidx		= q->cidx = q->inc_idx = 0;
252*a3667aaeSNaresh Kumar Inna 	q->size		= qsz;
253*a3667aaeSNaresh Kumar Inna 	q->wr_sz	= wrsize;	/* If using fixed size WRs */
254*a3667aaeSNaresh Kumar Inna 
255*a3667aaeSNaresh Kumar Inna 	wrm->free_qidx++;
256*a3667aaeSNaresh Kumar Inna 
257*a3667aaeSNaresh Kumar Inna 	if (type == CSIO_INGRESS) {
258*a3667aaeSNaresh Kumar Inna 		/* Since queue area is set to zero */
259*a3667aaeSNaresh Kumar Inna 		q->un.iq.genbit	= 1;
260*a3667aaeSNaresh Kumar Inna 
261*a3667aaeSNaresh Kumar Inna 		/*
262*a3667aaeSNaresh Kumar Inna 		 * Ingress queue status page size is always the size of
263*a3667aaeSNaresh Kumar Inna 		 * the ingress queue entry.
264*a3667aaeSNaresh Kumar Inna 		 */
265*a3667aaeSNaresh Kumar Inna 		q->credits	= (qsz - q->wr_sz) / q->wr_sz;
266*a3667aaeSNaresh Kumar Inna 		q->vwrap	= (void *)((uintptr_t)(q->vstart) + qsz
267*a3667aaeSNaresh Kumar Inna 							- q->wr_sz);
268*a3667aaeSNaresh Kumar Inna 
269*a3667aaeSNaresh Kumar Inna 		/* Allocate memory for FL if requested */
270*a3667aaeSNaresh Kumar Inna 		if (nflb > 0) {
271*a3667aaeSNaresh Kumar Inna 			flq_idx = csio_wr_alloc_q(hw, nflb * sizeof(__be64),
272*a3667aaeSNaresh Kumar Inna 						  sizeof(__be64), CSIO_FREELIST,
273*a3667aaeSNaresh Kumar Inna 						  owner, 0, sreg, NULL);
274*a3667aaeSNaresh Kumar Inna 			if (flq_idx == -1) {
275*a3667aaeSNaresh Kumar Inna 				csio_err(hw,
276*a3667aaeSNaresh Kumar Inna 					 "Failed to allocate FL queue"
277*a3667aaeSNaresh Kumar Inna 					 " for IQ idx:%d\n", free_idx);
278*a3667aaeSNaresh Kumar Inna 				return -1;
279*a3667aaeSNaresh Kumar Inna 			}
280*a3667aaeSNaresh Kumar Inna 
281*a3667aaeSNaresh Kumar Inna 			/* Associate the new FL with the Ingress quue */
282*a3667aaeSNaresh Kumar Inna 			q->un.iq.flq_idx = flq_idx;
283*a3667aaeSNaresh Kumar Inna 
284*a3667aaeSNaresh Kumar Inna 			flq = wrm->q_arr[q->un.iq.flq_idx];
285*a3667aaeSNaresh Kumar Inna 			flq->un.fl.bufs = kzalloc(flq->credits *
286*a3667aaeSNaresh Kumar Inna 						  sizeof(struct csio_dma_buf),
287*a3667aaeSNaresh Kumar Inna 						  GFP_KERNEL);
288*a3667aaeSNaresh Kumar Inna 			if (!flq->un.fl.bufs) {
289*a3667aaeSNaresh Kumar Inna 				csio_err(hw,
290*a3667aaeSNaresh Kumar Inna 					 "Failed to allocate FL queue bufs"
291*a3667aaeSNaresh Kumar Inna 					 " for IQ idx:%d\n", free_idx);
292*a3667aaeSNaresh Kumar Inna 				return -1;
293*a3667aaeSNaresh Kumar Inna 			}
294*a3667aaeSNaresh Kumar Inna 
295*a3667aaeSNaresh Kumar Inna 			flq->un.fl.packen = 0;
296*a3667aaeSNaresh Kumar Inna 			flq->un.fl.offset = 0;
297*a3667aaeSNaresh Kumar Inna 			flq->un.fl.sreg = sreg;
298*a3667aaeSNaresh Kumar Inna 
299*a3667aaeSNaresh Kumar Inna 			/* Fill up the free list buffers */
300*a3667aaeSNaresh Kumar Inna 			if (csio_wr_fill_fl(hw, flq))
301*a3667aaeSNaresh Kumar Inna 				return -1;
302*a3667aaeSNaresh Kumar Inna 
303*a3667aaeSNaresh Kumar Inna 			/*
304*a3667aaeSNaresh Kumar Inna 			 * Make sure in a FLQ, atleast 1 credit (8 FL buffers)
305*a3667aaeSNaresh Kumar Inna 			 * remains unpopulated,otherwise HW thinks
306*a3667aaeSNaresh Kumar Inna 			 * FLQ is empty.
307*a3667aaeSNaresh Kumar Inna 			 */
308*a3667aaeSNaresh Kumar Inna 			flq->pidx = flq->inc_idx = flq->credits - 8;
309*a3667aaeSNaresh Kumar Inna 		} else {
310*a3667aaeSNaresh Kumar Inna 			q->un.iq.flq_idx = -1;
311*a3667aaeSNaresh Kumar Inna 		}
312*a3667aaeSNaresh Kumar Inna 
313*a3667aaeSNaresh Kumar Inna 		/* Associate the IQ INTx handler. */
314*a3667aaeSNaresh Kumar Inna 		q->un.iq.iq_intx_handler = iq_intx_handler;
315*a3667aaeSNaresh Kumar Inna 
316*a3667aaeSNaresh Kumar Inna 		csio_q_iqid(hw, ret_idx) = CSIO_MAX_QID;
317*a3667aaeSNaresh Kumar Inna 
318*a3667aaeSNaresh Kumar Inna 	} else if (type == CSIO_EGRESS) {
319*a3667aaeSNaresh Kumar Inna 		q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / CSIO_QCREDIT_SZ;
320*a3667aaeSNaresh Kumar Inna 		q->vwrap   = (void *)((uintptr_t)(q->vstart) + qsz
321*a3667aaeSNaresh Kumar Inna 						- csio_wr_qstat_pgsz(hw));
322*a3667aaeSNaresh Kumar Inna 		csio_q_eqid(hw, ret_idx) = CSIO_MAX_QID;
323*a3667aaeSNaresh Kumar Inna 	} else { /* Freelist */
324*a3667aaeSNaresh Kumar Inna 		q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / sizeof(__be64);
325*a3667aaeSNaresh Kumar Inna 		q->vwrap   = (void *)((uintptr_t)(q->vstart) + qsz
326*a3667aaeSNaresh Kumar Inna 						- csio_wr_qstat_pgsz(hw));
327*a3667aaeSNaresh Kumar Inna 		csio_q_flid(hw, ret_idx) = CSIO_MAX_QID;
328*a3667aaeSNaresh Kumar Inna 	}
329*a3667aaeSNaresh Kumar Inna 
330*a3667aaeSNaresh Kumar Inna 	return ret_idx;
331*a3667aaeSNaresh Kumar Inna }
332*a3667aaeSNaresh Kumar Inna 
333*a3667aaeSNaresh Kumar Inna /*
334*a3667aaeSNaresh Kumar Inna  * csio_wr_iq_create_rsp - Response handler for IQ creation.
335*a3667aaeSNaresh Kumar Inna  * @hw: The HW module.
336*a3667aaeSNaresh Kumar Inna  * @mbp: Mailbox.
337*a3667aaeSNaresh Kumar Inna  * @iq_idx: Ingress queue that got created.
338*a3667aaeSNaresh Kumar Inna  *
339*a3667aaeSNaresh Kumar Inna  * Handle FW_IQ_CMD mailbox completion. Save off the assigned IQ/FL ids.
340*a3667aaeSNaresh Kumar Inna  */
341*a3667aaeSNaresh Kumar Inna static int
342*a3667aaeSNaresh Kumar Inna csio_wr_iq_create_rsp(struct csio_hw *hw, struct csio_mb *mbp, int iq_idx)
343*a3667aaeSNaresh Kumar Inna {
344*a3667aaeSNaresh Kumar Inna 	struct csio_iq_params iqp;
345*a3667aaeSNaresh Kumar Inna 	enum fw_retval retval;
346*a3667aaeSNaresh Kumar Inna 	uint32_t iq_id;
347*a3667aaeSNaresh Kumar Inna 	int flq_idx;
348*a3667aaeSNaresh Kumar Inna 
349*a3667aaeSNaresh Kumar Inna 	memset(&iqp, 0, sizeof(struct csio_iq_params));
350*a3667aaeSNaresh Kumar Inna 
351*a3667aaeSNaresh Kumar Inna 	csio_mb_iq_alloc_write_rsp(hw, mbp, &retval, &iqp);
352*a3667aaeSNaresh Kumar Inna 
353*a3667aaeSNaresh Kumar Inna 	if (retval != FW_SUCCESS) {
354*a3667aaeSNaresh Kumar Inna 		csio_err(hw, "IQ cmd returned 0x%x!\n", retval);
355*a3667aaeSNaresh Kumar Inna 		mempool_free(mbp, hw->mb_mempool);
356*a3667aaeSNaresh Kumar Inna 		return -EINVAL;
357*a3667aaeSNaresh Kumar Inna 	}
358*a3667aaeSNaresh Kumar Inna 
359*a3667aaeSNaresh Kumar Inna 	csio_q_iqid(hw, iq_idx)		= iqp.iqid;
360*a3667aaeSNaresh Kumar Inna 	csio_q_physiqid(hw, iq_idx)	= iqp.physiqid;
361*a3667aaeSNaresh Kumar Inna 	csio_q_pidx(hw, iq_idx)		= csio_q_cidx(hw, iq_idx) = 0;
362*a3667aaeSNaresh Kumar Inna 	csio_q_inc_idx(hw, iq_idx)	= 0;
363*a3667aaeSNaresh Kumar Inna 
364*a3667aaeSNaresh Kumar Inna 	/* Actual iq-id. */
365*a3667aaeSNaresh Kumar Inna 	iq_id = iqp.iqid - hw->wrm.fw_iq_start;
366*a3667aaeSNaresh Kumar Inna 
367*a3667aaeSNaresh Kumar Inna 	/* Set the iq-id to iq map table. */
368*a3667aaeSNaresh Kumar Inna 	if (iq_id >= CSIO_MAX_IQ) {
369*a3667aaeSNaresh Kumar Inna 		csio_err(hw,
370*a3667aaeSNaresh Kumar Inna 			 "Exceeding MAX_IQ(%d) supported!"
371*a3667aaeSNaresh Kumar Inna 			 " iqid:%d rel_iqid:%d FW iq_start:%d\n",
372*a3667aaeSNaresh Kumar Inna 			 CSIO_MAX_IQ, iq_id, iqp.iqid, hw->wrm.fw_iq_start);
373*a3667aaeSNaresh Kumar Inna 		mempool_free(mbp, hw->mb_mempool);
374*a3667aaeSNaresh Kumar Inna 		return -EINVAL;
375*a3667aaeSNaresh Kumar Inna 	}
376*a3667aaeSNaresh Kumar Inna 	csio_q_set_intr_map(hw, iq_idx, iq_id);
377*a3667aaeSNaresh Kumar Inna 
378*a3667aaeSNaresh Kumar Inna 	/*
379*a3667aaeSNaresh Kumar Inna 	 * During FW_IQ_CMD, FW sets interrupt_sent bit to 1 in the SGE
380*a3667aaeSNaresh Kumar Inna 	 * ingress context of this queue. This will block interrupts to
381*a3667aaeSNaresh Kumar Inna 	 * this queue until the next GTS write. Therefore, we do a
382*a3667aaeSNaresh Kumar Inna 	 * 0-cidx increment GTS write for this queue just to clear the
383*a3667aaeSNaresh Kumar Inna 	 * interrupt_sent bit. This will re-enable interrupts to this
384*a3667aaeSNaresh Kumar Inna 	 * queue.
385*a3667aaeSNaresh Kumar Inna 	 */
386*a3667aaeSNaresh Kumar Inna 	csio_wr_sge_intr_enable(hw, iqp.physiqid);
387*a3667aaeSNaresh Kumar Inna 
388*a3667aaeSNaresh Kumar Inna 	flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
389*a3667aaeSNaresh Kumar Inna 	if (flq_idx != -1) {
390*a3667aaeSNaresh Kumar Inna 		struct csio_q *flq = hw->wrm.q_arr[flq_idx];
391*a3667aaeSNaresh Kumar Inna 
392*a3667aaeSNaresh Kumar Inna 		csio_q_flid(hw, flq_idx) = iqp.fl0id;
393*a3667aaeSNaresh Kumar Inna 		csio_q_cidx(hw, flq_idx) = 0;
394*a3667aaeSNaresh Kumar Inna 		csio_q_pidx(hw, flq_idx)    = csio_q_credits(hw, flq_idx) - 8;
395*a3667aaeSNaresh Kumar Inna 		csio_q_inc_idx(hw, flq_idx) = csio_q_credits(hw, flq_idx) - 8;
396*a3667aaeSNaresh Kumar Inna 
397*a3667aaeSNaresh Kumar Inna 		/* Now update SGE about the buffers allocated during init */
398*a3667aaeSNaresh Kumar Inna 		csio_wr_ring_fldb(hw, flq);
399*a3667aaeSNaresh Kumar Inna 	}
400*a3667aaeSNaresh Kumar Inna 
401*a3667aaeSNaresh Kumar Inna 	mempool_free(mbp, hw->mb_mempool);
402*a3667aaeSNaresh Kumar Inna 
403*a3667aaeSNaresh Kumar Inna 	return 0;
404*a3667aaeSNaresh Kumar Inna }
405*a3667aaeSNaresh Kumar Inna 
406*a3667aaeSNaresh Kumar Inna /*
407*a3667aaeSNaresh Kumar Inna  * csio_wr_iq_create - Configure an Ingress queue with FW.
408*a3667aaeSNaresh Kumar Inna  * @hw: The HW module.
409*a3667aaeSNaresh Kumar Inna  * @priv: Private data object.
410*a3667aaeSNaresh Kumar Inna  * @iq_idx: Ingress queue index in the WR module.
411*a3667aaeSNaresh Kumar Inna  * @vec: MSIX vector.
412*a3667aaeSNaresh Kumar Inna  * @portid: PCIE Channel to be associated with this queue.
413*a3667aaeSNaresh Kumar Inna  * @async: Is this a FW asynchronous message handling queue?
414*a3667aaeSNaresh Kumar Inna  * @cbfn: Completion callback.
415*a3667aaeSNaresh Kumar Inna  *
416*a3667aaeSNaresh Kumar Inna  * This API configures an ingress queue with FW by issuing a FW_IQ_CMD mailbox
417*a3667aaeSNaresh Kumar Inna  * with alloc/write bits set.
418*a3667aaeSNaresh Kumar Inna  */
419*a3667aaeSNaresh Kumar Inna int
420*a3667aaeSNaresh Kumar Inna csio_wr_iq_create(struct csio_hw *hw, void *priv, int iq_idx,
421*a3667aaeSNaresh Kumar Inna 		  uint32_t vec, uint8_t portid, bool async,
422*a3667aaeSNaresh Kumar Inna 		  void (*cbfn) (struct csio_hw *, struct csio_mb *))
423*a3667aaeSNaresh Kumar Inna {
424*a3667aaeSNaresh Kumar Inna 	struct csio_mb  *mbp;
425*a3667aaeSNaresh Kumar Inna 	struct csio_iq_params iqp;
426*a3667aaeSNaresh Kumar Inna 	int flq_idx;
427*a3667aaeSNaresh Kumar Inna 
428*a3667aaeSNaresh Kumar Inna 	memset(&iqp, 0, sizeof(struct csio_iq_params));
429*a3667aaeSNaresh Kumar Inna 	csio_q_portid(hw, iq_idx) = portid;
430*a3667aaeSNaresh Kumar Inna 
431*a3667aaeSNaresh Kumar Inna 	mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
432*a3667aaeSNaresh Kumar Inna 	if (!mbp) {
433*a3667aaeSNaresh Kumar Inna 		csio_err(hw, "IQ command out of memory!\n");
434*a3667aaeSNaresh Kumar Inna 		return -ENOMEM;
435*a3667aaeSNaresh Kumar Inna 	}
436*a3667aaeSNaresh Kumar Inna 
437*a3667aaeSNaresh Kumar Inna 	switch (hw->intr_mode) {
438*a3667aaeSNaresh Kumar Inna 	case CSIO_IM_INTX:
439*a3667aaeSNaresh Kumar Inna 	case CSIO_IM_MSI:
440*a3667aaeSNaresh Kumar Inna 		/* For interrupt forwarding queue only */
441*a3667aaeSNaresh Kumar Inna 		if (hw->intr_iq_idx == iq_idx)
442*a3667aaeSNaresh Kumar Inna 			iqp.iqandst	= X_INTERRUPTDESTINATION_PCIE;
443*a3667aaeSNaresh Kumar Inna 		else
444*a3667aaeSNaresh Kumar Inna 			iqp.iqandst	= X_INTERRUPTDESTINATION_IQ;
445*a3667aaeSNaresh Kumar Inna 		iqp.iqandstindex	=
446*a3667aaeSNaresh Kumar Inna 			csio_q_physiqid(hw, hw->intr_iq_idx);
447*a3667aaeSNaresh Kumar Inna 		break;
448*a3667aaeSNaresh Kumar Inna 	case CSIO_IM_MSIX:
449*a3667aaeSNaresh Kumar Inna 		iqp.iqandst		= X_INTERRUPTDESTINATION_PCIE;
450*a3667aaeSNaresh Kumar Inna 		iqp.iqandstindex	= (uint16_t)vec;
451*a3667aaeSNaresh Kumar Inna 		break;
452*a3667aaeSNaresh Kumar Inna 	case CSIO_IM_NONE:
453*a3667aaeSNaresh Kumar Inna 		mempool_free(mbp, hw->mb_mempool);
454*a3667aaeSNaresh Kumar Inna 		return -EINVAL;
455*a3667aaeSNaresh Kumar Inna 	}
456*a3667aaeSNaresh Kumar Inna 
457*a3667aaeSNaresh Kumar Inna 	/* Pass in the ingress queue cmd parameters */
458*a3667aaeSNaresh Kumar Inna 	iqp.pfn			= hw->pfn;
459*a3667aaeSNaresh Kumar Inna 	iqp.vfn			= 0;
460*a3667aaeSNaresh Kumar Inna 	iqp.iq_start		= 1;
461*a3667aaeSNaresh Kumar Inna 	iqp.viid		= 0;
462*a3667aaeSNaresh Kumar Inna 	iqp.type		= FW_IQ_TYPE_FL_INT_CAP;
463*a3667aaeSNaresh Kumar Inna 	iqp.iqasynch		= async;
464*a3667aaeSNaresh Kumar Inna 	if (csio_intr_coalesce_cnt)
465*a3667aaeSNaresh Kumar Inna 		iqp.iqanus	= X_UPDATESCHEDULING_COUNTER_OPTTIMER;
466*a3667aaeSNaresh Kumar Inna 	else
467*a3667aaeSNaresh Kumar Inna 		iqp.iqanus	= X_UPDATESCHEDULING_TIMER;
468*a3667aaeSNaresh Kumar Inna 	iqp.iqanud		= X_UPDATEDELIVERY_INTERRUPT;
469*a3667aaeSNaresh Kumar Inna 	iqp.iqpciech		= portid;
470*a3667aaeSNaresh Kumar Inna 	iqp.iqintcntthresh	= (uint8_t)csio_sge_thresh_reg;
471*a3667aaeSNaresh Kumar Inna 
472*a3667aaeSNaresh Kumar Inna 	switch (csio_q_wr_sz(hw, iq_idx)) {
473*a3667aaeSNaresh Kumar Inna 	case 16:
474*a3667aaeSNaresh Kumar Inna 		iqp.iqesize = 0; break;
475*a3667aaeSNaresh Kumar Inna 	case 32:
476*a3667aaeSNaresh Kumar Inna 		iqp.iqesize = 1; break;
477*a3667aaeSNaresh Kumar Inna 	case 64:
478*a3667aaeSNaresh Kumar Inna 		iqp.iqesize = 2; break;
479*a3667aaeSNaresh Kumar Inna 	case 128:
480*a3667aaeSNaresh Kumar Inna 		iqp.iqesize = 3; break;
481*a3667aaeSNaresh Kumar Inna 	}
482*a3667aaeSNaresh Kumar Inna 
483*a3667aaeSNaresh Kumar Inna 	iqp.iqsize		= csio_q_size(hw, iq_idx) /
484*a3667aaeSNaresh Kumar Inna 						csio_q_wr_sz(hw, iq_idx);
485*a3667aaeSNaresh Kumar Inna 	iqp.iqaddr		= csio_q_pstart(hw, iq_idx);
486*a3667aaeSNaresh Kumar Inna 
487*a3667aaeSNaresh Kumar Inna 	flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
488*a3667aaeSNaresh Kumar Inna 	if (flq_idx != -1) {
489*a3667aaeSNaresh Kumar Inna 		struct csio_q *flq = hw->wrm.q_arr[flq_idx];
490*a3667aaeSNaresh Kumar Inna 
491*a3667aaeSNaresh Kumar Inna 		iqp.fl0paden	= 1;
492*a3667aaeSNaresh Kumar Inna 		iqp.fl0packen	= flq->un.fl.packen ? 1 : 0;
493*a3667aaeSNaresh Kumar Inna 		iqp.fl0fbmin	= X_FETCHBURSTMIN_64B;
494*a3667aaeSNaresh Kumar Inna 		iqp.fl0fbmax	= X_FETCHBURSTMAX_512B;
495*a3667aaeSNaresh Kumar Inna 		iqp.fl0size	= csio_q_size(hw, flq_idx) / CSIO_QCREDIT_SZ;
496*a3667aaeSNaresh Kumar Inna 		iqp.fl0addr	= csio_q_pstart(hw, flq_idx);
497*a3667aaeSNaresh Kumar Inna 	}
498*a3667aaeSNaresh Kumar Inna 
499*a3667aaeSNaresh Kumar Inna 	csio_mb_iq_alloc_write(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &iqp, cbfn);
500*a3667aaeSNaresh Kumar Inna 
501*a3667aaeSNaresh Kumar Inna 	if (csio_mb_issue(hw, mbp)) {
502*a3667aaeSNaresh Kumar Inna 		csio_err(hw, "Issue of IQ cmd failed!\n");
503*a3667aaeSNaresh Kumar Inna 		mempool_free(mbp, hw->mb_mempool);
504*a3667aaeSNaresh Kumar Inna 		return -EINVAL;
505*a3667aaeSNaresh Kumar Inna 	}
506*a3667aaeSNaresh Kumar Inna 
507*a3667aaeSNaresh Kumar Inna 	if (cbfn != NULL)
508*a3667aaeSNaresh Kumar Inna 		return 0;
509*a3667aaeSNaresh Kumar Inna 
510*a3667aaeSNaresh Kumar Inna 	return csio_wr_iq_create_rsp(hw, mbp, iq_idx);
511*a3667aaeSNaresh Kumar Inna }
512*a3667aaeSNaresh Kumar Inna 
513*a3667aaeSNaresh Kumar Inna /*
514*a3667aaeSNaresh Kumar Inna  * csio_wr_eq_create_rsp - Response handler for EQ creation.
515*a3667aaeSNaresh Kumar Inna  * @hw: The HW module.
516*a3667aaeSNaresh Kumar Inna  * @mbp: Mailbox.
517*a3667aaeSNaresh Kumar Inna  * @eq_idx: Egress queue that got created.
518*a3667aaeSNaresh Kumar Inna  *
519*a3667aaeSNaresh Kumar Inna  * Handle FW_EQ_OFLD_CMD mailbox completion. Save off the assigned EQ ids.
520*a3667aaeSNaresh Kumar Inna  */
521*a3667aaeSNaresh Kumar Inna static int
522*a3667aaeSNaresh Kumar Inna csio_wr_eq_cfg_rsp(struct csio_hw *hw, struct csio_mb *mbp, int eq_idx)
523*a3667aaeSNaresh Kumar Inna {
524*a3667aaeSNaresh Kumar Inna 	struct csio_eq_params eqp;
525*a3667aaeSNaresh Kumar Inna 	enum fw_retval retval;
526*a3667aaeSNaresh Kumar Inna 
527*a3667aaeSNaresh Kumar Inna 	memset(&eqp, 0, sizeof(struct csio_eq_params));
528*a3667aaeSNaresh Kumar Inna 
529*a3667aaeSNaresh Kumar Inna 	csio_mb_eq_ofld_alloc_write_rsp(hw, mbp, &retval, &eqp);
530*a3667aaeSNaresh Kumar Inna 
531*a3667aaeSNaresh Kumar Inna 	if (retval != FW_SUCCESS) {
532*a3667aaeSNaresh Kumar Inna 		csio_err(hw, "EQ OFLD cmd returned 0x%x!\n", retval);
533*a3667aaeSNaresh Kumar Inna 		mempool_free(mbp, hw->mb_mempool);
534*a3667aaeSNaresh Kumar Inna 		return -EINVAL;
535*a3667aaeSNaresh Kumar Inna 	}
536*a3667aaeSNaresh Kumar Inna 
537*a3667aaeSNaresh Kumar Inna 	csio_q_eqid(hw, eq_idx)	= (uint16_t)eqp.eqid;
538*a3667aaeSNaresh Kumar Inna 	csio_q_physeqid(hw, eq_idx) = (uint16_t)eqp.physeqid;
539*a3667aaeSNaresh Kumar Inna 	csio_q_pidx(hw, eq_idx)	= csio_q_cidx(hw, eq_idx) = 0;
540*a3667aaeSNaresh Kumar Inna 	csio_q_inc_idx(hw, eq_idx) = 0;
541*a3667aaeSNaresh Kumar Inna 
542*a3667aaeSNaresh Kumar Inna 	mempool_free(mbp, hw->mb_mempool);
543*a3667aaeSNaresh Kumar Inna 
544*a3667aaeSNaresh Kumar Inna 	return 0;
545*a3667aaeSNaresh Kumar Inna }
546*a3667aaeSNaresh Kumar Inna 
547*a3667aaeSNaresh Kumar Inna /*
548*a3667aaeSNaresh Kumar Inna  * csio_wr_eq_create - Configure an Egress queue with FW.
549*a3667aaeSNaresh Kumar Inna  * @hw: HW module.
550*a3667aaeSNaresh Kumar Inna  * @priv: Private data.
551*a3667aaeSNaresh Kumar Inna  * @eq_idx: Egress queue index in the WR module.
552*a3667aaeSNaresh Kumar Inna  * @iq_idx: Associated ingress queue index.
553*a3667aaeSNaresh Kumar Inna  * @cbfn: Completion callback.
554*a3667aaeSNaresh Kumar Inna  *
555*a3667aaeSNaresh Kumar Inna  * This API configures a offload egress queue with FW by issuing a
556*a3667aaeSNaresh Kumar Inna  * FW_EQ_OFLD_CMD  (with alloc + write ) mailbox.
557*a3667aaeSNaresh Kumar Inna  */
558*a3667aaeSNaresh Kumar Inna int
559*a3667aaeSNaresh Kumar Inna csio_wr_eq_create(struct csio_hw *hw, void *priv, int eq_idx,
560*a3667aaeSNaresh Kumar Inna 		  int iq_idx, uint8_t portid,
561*a3667aaeSNaresh Kumar Inna 		  void (*cbfn) (struct csio_hw *, struct csio_mb *))
562*a3667aaeSNaresh Kumar Inna {
563*a3667aaeSNaresh Kumar Inna 	struct csio_mb  *mbp;
564*a3667aaeSNaresh Kumar Inna 	struct csio_eq_params eqp;
565*a3667aaeSNaresh Kumar Inna 
566*a3667aaeSNaresh Kumar Inna 	memset(&eqp, 0, sizeof(struct csio_eq_params));
567*a3667aaeSNaresh Kumar Inna 
568*a3667aaeSNaresh Kumar Inna 	mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
569*a3667aaeSNaresh Kumar Inna 	if (!mbp) {
570*a3667aaeSNaresh Kumar Inna 		csio_err(hw, "EQ command out of memory!\n");
571*a3667aaeSNaresh Kumar Inna 		return -ENOMEM;
572*a3667aaeSNaresh Kumar Inna 	}
573*a3667aaeSNaresh Kumar Inna 
574*a3667aaeSNaresh Kumar Inna 	eqp.pfn			= hw->pfn;
575*a3667aaeSNaresh Kumar Inna 	eqp.vfn			= 0;
576*a3667aaeSNaresh Kumar Inna 	eqp.eqstart		= 1;
577*a3667aaeSNaresh Kumar Inna 	eqp.hostfcmode		= X_HOSTFCMODE_STATUS_PAGE;
578*a3667aaeSNaresh Kumar Inna 	eqp.iqid		= csio_q_iqid(hw, iq_idx);
579*a3667aaeSNaresh Kumar Inna 	eqp.fbmin		= X_FETCHBURSTMIN_64B;
580*a3667aaeSNaresh Kumar Inna 	eqp.fbmax		= X_FETCHBURSTMAX_512B;
581*a3667aaeSNaresh Kumar Inna 	eqp.cidxfthresh		= 0;
582*a3667aaeSNaresh Kumar Inna 	eqp.pciechn		= portid;
583*a3667aaeSNaresh Kumar Inna 	eqp.eqsize		= csio_q_size(hw, eq_idx) / CSIO_QCREDIT_SZ;
584*a3667aaeSNaresh Kumar Inna 	eqp.eqaddr		= csio_q_pstart(hw, eq_idx);
585*a3667aaeSNaresh Kumar Inna 
586*a3667aaeSNaresh Kumar Inna 	csio_mb_eq_ofld_alloc_write(hw, mbp, priv, CSIO_MB_DEFAULT_TMO,
587*a3667aaeSNaresh Kumar Inna 				    &eqp, cbfn);
588*a3667aaeSNaresh Kumar Inna 
589*a3667aaeSNaresh Kumar Inna 	if (csio_mb_issue(hw, mbp)) {
590*a3667aaeSNaresh Kumar Inna 		csio_err(hw, "Issue of EQ OFLD cmd failed!\n");
591*a3667aaeSNaresh Kumar Inna 		mempool_free(mbp, hw->mb_mempool);
592*a3667aaeSNaresh Kumar Inna 		return -EINVAL;
593*a3667aaeSNaresh Kumar Inna 	}
594*a3667aaeSNaresh Kumar Inna 
595*a3667aaeSNaresh Kumar Inna 	if (cbfn != NULL)
596*a3667aaeSNaresh Kumar Inna 		return 0;
597*a3667aaeSNaresh Kumar Inna 
598*a3667aaeSNaresh Kumar Inna 	return csio_wr_eq_cfg_rsp(hw, mbp, eq_idx);
599*a3667aaeSNaresh Kumar Inna }
600*a3667aaeSNaresh Kumar Inna 
601*a3667aaeSNaresh Kumar Inna /*
602*a3667aaeSNaresh Kumar Inna  * csio_wr_iq_destroy_rsp - Response handler for IQ removal.
603*a3667aaeSNaresh Kumar Inna  * @hw: The HW module.
604*a3667aaeSNaresh Kumar Inna  * @mbp: Mailbox.
605*a3667aaeSNaresh Kumar Inna  * @iq_idx: Ingress queue that was freed.
606*a3667aaeSNaresh Kumar Inna  *
607*a3667aaeSNaresh Kumar Inna  * Handle FW_IQ_CMD (free) mailbox completion.
608*a3667aaeSNaresh Kumar Inna  */
609*a3667aaeSNaresh Kumar Inna static int
610*a3667aaeSNaresh Kumar Inna csio_wr_iq_destroy_rsp(struct csio_hw *hw, struct csio_mb *mbp, int iq_idx)
611*a3667aaeSNaresh Kumar Inna {
612*a3667aaeSNaresh Kumar Inna 	enum fw_retval retval = csio_mb_fw_retval(mbp);
613*a3667aaeSNaresh Kumar Inna 	int rv = 0;
614*a3667aaeSNaresh Kumar Inna 
615*a3667aaeSNaresh Kumar Inna 	if (retval != FW_SUCCESS)
616*a3667aaeSNaresh Kumar Inna 		rv = -EINVAL;
617*a3667aaeSNaresh Kumar Inna 
618*a3667aaeSNaresh Kumar Inna 	mempool_free(mbp, hw->mb_mempool);
619*a3667aaeSNaresh Kumar Inna 
620*a3667aaeSNaresh Kumar Inna 	return rv;
621*a3667aaeSNaresh Kumar Inna }
622*a3667aaeSNaresh Kumar Inna 
623*a3667aaeSNaresh Kumar Inna /*
624*a3667aaeSNaresh Kumar Inna  * csio_wr_iq_destroy - Free an ingress queue.
625*a3667aaeSNaresh Kumar Inna  * @hw: The HW module.
626*a3667aaeSNaresh Kumar Inna  * @priv: Private data object.
627*a3667aaeSNaresh Kumar Inna  * @iq_idx: Ingress queue index to destroy
628*a3667aaeSNaresh Kumar Inna  * @cbfn: Completion callback.
629*a3667aaeSNaresh Kumar Inna  *
630*a3667aaeSNaresh Kumar Inna  * This API frees an ingress queue by issuing the FW_IQ_CMD
631*a3667aaeSNaresh Kumar Inna  * with the free bit set.
632*a3667aaeSNaresh Kumar Inna  */
633*a3667aaeSNaresh Kumar Inna static int
634*a3667aaeSNaresh Kumar Inna csio_wr_iq_destroy(struct csio_hw *hw, void *priv, int iq_idx,
635*a3667aaeSNaresh Kumar Inna 		   void (*cbfn)(struct csio_hw *, struct csio_mb *))
636*a3667aaeSNaresh Kumar Inna {
637*a3667aaeSNaresh Kumar Inna 	int rv = 0;
638*a3667aaeSNaresh Kumar Inna 	struct csio_mb  *mbp;
639*a3667aaeSNaresh Kumar Inna 	struct csio_iq_params iqp;
640*a3667aaeSNaresh Kumar Inna 	int flq_idx;
641*a3667aaeSNaresh Kumar Inna 
642*a3667aaeSNaresh Kumar Inna 	memset(&iqp, 0, sizeof(struct csio_iq_params));
643*a3667aaeSNaresh Kumar Inna 
644*a3667aaeSNaresh Kumar Inna 	mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
645*a3667aaeSNaresh Kumar Inna 	if (!mbp)
646*a3667aaeSNaresh Kumar Inna 		return -ENOMEM;
647*a3667aaeSNaresh Kumar Inna 
648*a3667aaeSNaresh Kumar Inna 	iqp.pfn		= hw->pfn;
649*a3667aaeSNaresh Kumar Inna 	iqp.vfn		= 0;
650*a3667aaeSNaresh Kumar Inna 	iqp.iqid	= csio_q_iqid(hw, iq_idx);
651*a3667aaeSNaresh Kumar Inna 	iqp.type	= FW_IQ_TYPE_FL_INT_CAP;
652*a3667aaeSNaresh Kumar Inna 
653*a3667aaeSNaresh Kumar Inna 	flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
654*a3667aaeSNaresh Kumar Inna 	if (flq_idx != -1)
655*a3667aaeSNaresh Kumar Inna 		iqp.fl0id = csio_q_flid(hw, flq_idx);
656*a3667aaeSNaresh Kumar Inna 	else
657*a3667aaeSNaresh Kumar Inna 		iqp.fl0id = 0xFFFF;
658*a3667aaeSNaresh Kumar Inna 
659*a3667aaeSNaresh Kumar Inna 	iqp.fl1id = 0xFFFF;
660*a3667aaeSNaresh Kumar Inna 
661*a3667aaeSNaresh Kumar Inna 	csio_mb_iq_free(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &iqp, cbfn);
662*a3667aaeSNaresh Kumar Inna 
663*a3667aaeSNaresh Kumar Inna 	rv = csio_mb_issue(hw, mbp);
664*a3667aaeSNaresh Kumar Inna 	if (rv != 0) {
665*a3667aaeSNaresh Kumar Inna 		mempool_free(mbp, hw->mb_mempool);
666*a3667aaeSNaresh Kumar Inna 		return rv;
667*a3667aaeSNaresh Kumar Inna 	}
668*a3667aaeSNaresh Kumar Inna 
669*a3667aaeSNaresh Kumar Inna 	if (cbfn != NULL)
670*a3667aaeSNaresh Kumar Inna 		return 0;
671*a3667aaeSNaresh Kumar Inna 
672*a3667aaeSNaresh Kumar Inna 	return csio_wr_iq_destroy_rsp(hw, mbp, iq_idx);
673*a3667aaeSNaresh Kumar Inna }
674*a3667aaeSNaresh Kumar Inna 
675*a3667aaeSNaresh Kumar Inna /*
676*a3667aaeSNaresh Kumar Inna  * csio_wr_eq_destroy_rsp - Response handler for OFLD EQ creation.
677*a3667aaeSNaresh Kumar Inna  * @hw: The HW module.
678*a3667aaeSNaresh Kumar Inna  * @mbp: Mailbox.
679*a3667aaeSNaresh Kumar Inna  * @eq_idx: Egress queue that was freed.
680*a3667aaeSNaresh Kumar Inna  *
681*a3667aaeSNaresh Kumar Inna  * Handle FW_OFLD_EQ_CMD (free) mailbox completion.
682*a3667aaeSNaresh Kumar Inna  */
683*a3667aaeSNaresh Kumar Inna static int
684*a3667aaeSNaresh Kumar Inna csio_wr_eq_destroy_rsp(struct csio_hw *hw, struct csio_mb *mbp, int eq_idx)
685*a3667aaeSNaresh Kumar Inna {
686*a3667aaeSNaresh Kumar Inna 	enum fw_retval retval = csio_mb_fw_retval(mbp);
687*a3667aaeSNaresh Kumar Inna 	int rv = 0;
688*a3667aaeSNaresh Kumar Inna 
689*a3667aaeSNaresh Kumar Inna 	if (retval != FW_SUCCESS)
690*a3667aaeSNaresh Kumar Inna 		rv = -EINVAL;
691*a3667aaeSNaresh Kumar Inna 
692*a3667aaeSNaresh Kumar Inna 	mempool_free(mbp, hw->mb_mempool);
693*a3667aaeSNaresh Kumar Inna 
694*a3667aaeSNaresh Kumar Inna 	return rv;
695*a3667aaeSNaresh Kumar Inna }
696*a3667aaeSNaresh Kumar Inna 
697*a3667aaeSNaresh Kumar Inna /*
698*a3667aaeSNaresh Kumar Inna  * csio_wr_eq_destroy - Free an Egress queue.
699*a3667aaeSNaresh Kumar Inna  * @hw: The HW module.
700*a3667aaeSNaresh Kumar Inna  * @priv: Private data object.
701*a3667aaeSNaresh Kumar Inna  * @eq_idx: Egress queue index to destroy
702*a3667aaeSNaresh Kumar Inna  * @cbfn: Completion callback.
703*a3667aaeSNaresh Kumar Inna  *
704*a3667aaeSNaresh Kumar Inna  * This API frees an Egress queue by issuing the FW_EQ_OFLD_CMD
705*a3667aaeSNaresh Kumar Inna  * with the free bit set.
706*a3667aaeSNaresh Kumar Inna  */
707*a3667aaeSNaresh Kumar Inna static int
708*a3667aaeSNaresh Kumar Inna csio_wr_eq_destroy(struct csio_hw *hw, void *priv, int eq_idx,
709*a3667aaeSNaresh Kumar Inna 		   void (*cbfn) (struct csio_hw *, struct csio_mb *))
710*a3667aaeSNaresh Kumar Inna {
711*a3667aaeSNaresh Kumar Inna 	int rv = 0;
712*a3667aaeSNaresh Kumar Inna 	struct csio_mb  *mbp;
713*a3667aaeSNaresh Kumar Inna 	struct csio_eq_params eqp;
714*a3667aaeSNaresh Kumar Inna 
715*a3667aaeSNaresh Kumar Inna 	memset(&eqp, 0, sizeof(struct csio_eq_params));
716*a3667aaeSNaresh Kumar Inna 
717*a3667aaeSNaresh Kumar Inna 	mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
718*a3667aaeSNaresh Kumar Inna 	if (!mbp)
719*a3667aaeSNaresh Kumar Inna 		return -ENOMEM;
720*a3667aaeSNaresh Kumar Inna 
721*a3667aaeSNaresh Kumar Inna 	eqp.pfn		= hw->pfn;
722*a3667aaeSNaresh Kumar Inna 	eqp.vfn		= 0;
723*a3667aaeSNaresh Kumar Inna 	eqp.eqid	= csio_q_eqid(hw, eq_idx);
724*a3667aaeSNaresh Kumar Inna 
725*a3667aaeSNaresh Kumar Inna 	csio_mb_eq_ofld_free(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &eqp, cbfn);
726*a3667aaeSNaresh Kumar Inna 
727*a3667aaeSNaresh Kumar Inna 	rv = csio_mb_issue(hw, mbp);
728*a3667aaeSNaresh Kumar Inna 	if (rv != 0) {
729*a3667aaeSNaresh Kumar Inna 		mempool_free(mbp, hw->mb_mempool);
730*a3667aaeSNaresh Kumar Inna 		return rv;
731*a3667aaeSNaresh Kumar Inna 	}
732*a3667aaeSNaresh Kumar Inna 
733*a3667aaeSNaresh Kumar Inna 	if (cbfn != NULL)
734*a3667aaeSNaresh Kumar Inna 		return 0;
735*a3667aaeSNaresh Kumar Inna 
736*a3667aaeSNaresh Kumar Inna 	return csio_wr_eq_destroy_rsp(hw, mbp, eq_idx);
737*a3667aaeSNaresh Kumar Inna }
738*a3667aaeSNaresh Kumar Inna 
739*a3667aaeSNaresh Kumar Inna /*
740*a3667aaeSNaresh Kumar Inna  * csio_wr_cleanup_eq_stpg - Cleanup Egress queue status page
741*a3667aaeSNaresh Kumar Inna  * @hw: HW module
742*a3667aaeSNaresh Kumar Inna  * @qidx: Egress queue index
743*a3667aaeSNaresh Kumar Inna  *
744*a3667aaeSNaresh Kumar Inna  * Cleanup the Egress queue status page.
745*a3667aaeSNaresh Kumar Inna  */
746*a3667aaeSNaresh Kumar Inna static void
747*a3667aaeSNaresh Kumar Inna csio_wr_cleanup_eq_stpg(struct csio_hw *hw, int qidx)
748*a3667aaeSNaresh Kumar Inna {
749*a3667aaeSNaresh Kumar Inna 	struct csio_q	*q = csio_hw_to_wrm(hw)->q_arr[qidx];
750*a3667aaeSNaresh Kumar Inna 	struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap;
751*a3667aaeSNaresh Kumar Inna 
752*a3667aaeSNaresh Kumar Inna 	memset(stp, 0, sizeof(*stp));
753*a3667aaeSNaresh Kumar Inna }
754*a3667aaeSNaresh Kumar Inna 
755*a3667aaeSNaresh Kumar Inna /*
756*a3667aaeSNaresh Kumar Inna  * csio_wr_cleanup_iq_ftr - Cleanup Footer entries in IQ
757*a3667aaeSNaresh Kumar Inna  * @hw: HW module
758*a3667aaeSNaresh Kumar Inna  * @qidx: Ingress queue index
759*a3667aaeSNaresh Kumar Inna  *
760*a3667aaeSNaresh Kumar Inna  * Cleanup the footer entries in the given ingress queue,
761*a3667aaeSNaresh Kumar Inna  * set to 1 the internal copy of genbit.
762*a3667aaeSNaresh Kumar Inna  */
763*a3667aaeSNaresh Kumar Inna static void
764*a3667aaeSNaresh Kumar Inna csio_wr_cleanup_iq_ftr(struct csio_hw *hw, int qidx)
765*a3667aaeSNaresh Kumar Inna {
766*a3667aaeSNaresh Kumar Inna 	struct csio_wrm *wrm	= csio_hw_to_wrm(hw);
767*a3667aaeSNaresh Kumar Inna 	struct csio_q	*q	= wrm->q_arr[qidx];
768*a3667aaeSNaresh Kumar Inna 	void *wr;
769*a3667aaeSNaresh Kumar Inna 	struct csio_iqwr_footer *ftr;
770*a3667aaeSNaresh Kumar Inna 	uint32_t i = 0;
771*a3667aaeSNaresh Kumar Inna 
772*a3667aaeSNaresh Kumar Inna 	/* set to 1 since we are just about zero out genbit */
773*a3667aaeSNaresh Kumar Inna 	q->un.iq.genbit = 1;
774*a3667aaeSNaresh Kumar Inna 
775*a3667aaeSNaresh Kumar Inna 	for (i = 0; i < q->credits; i++) {
776*a3667aaeSNaresh Kumar Inna 		/* Get the WR */
777*a3667aaeSNaresh Kumar Inna 		wr = (void *)((uintptr_t)q->vstart +
778*a3667aaeSNaresh Kumar Inna 					   (i * q->wr_sz));
779*a3667aaeSNaresh Kumar Inna 		/* Get the footer */
780*a3667aaeSNaresh Kumar Inna 		ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
781*a3667aaeSNaresh Kumar Inna 					  (q->wr_sz - sizeof(*ftr)));
782*a3667aaeSNaresh Kumar Inna 		/* Zero out footer */
783*a3667aaeSNaresh Kumar Inna 		memset(ftr, 0, sizeof(*ftr));
784*a3667aaeSNaresh Kumar Inna 	}
785*a3667aaeSNaresh Kumar Inna }
786*a3667aaeSNaresh Kumar Inna 
787*a3667aaeSNaresh Kumar Inna int
788*a3667aaeSNaresh Kumar Inna csio_wr_destroy_queues(struct csio_hw *hw, bool cmd)
789*a3667aaeSNaresh Kumar Inna {
790*a3667aaeSNaresh Kumar Inna 	int i, flq_idx;
791*a3667aaeSNaresh Kumar Inna 	struct csio_q *q;
792*a3667aaeSNaresh Kumar Inna 	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
793*a3667aaeSNaresh Kumar Inna 	int rv;
794*a3667aaeSNaresh Kumar Inna 
795*a3667aaeSNaresh Kumar Inna 	for (i = 0; i < wrm->free_qidx; i++) {
796*a3667aaeSNaresh Kumar Inna 		q = wrm->q_arr[i];
797*a3667aaeSNaresh Kumar Inna 
798*a3667aaeSNaresh Kumar Inna 		switch (q->type) {
799*a3667aaeSNaresh Kumar Inna 		case CSIO_EGRESS:
800*a3667aaeSNaresh Kumar Inna 			if (csio_q_eqid(hw, i) != CSIO_MAX_QID) {
801*a3667aaeSNaresh Kumar Inna 				csio_wr_cleanup_eq_stpg(hw, i);
802*a3667aaeSNaresh Kumar Inna 				if (!cmd) {
803*a3667aaeSNaresh Kumar Inna 					csio_q_eqid(hw, i) = CSIO_MAX_QID;
804*a3667aaeSNaresh Kumar Inna 					continue;
805*a3667aaeSNaresh Kumar Inna 				}
806*a3667aaeSNaresh Kumar Inna 
807*a3667aaeSNaresh Kumar Inna 				rv = csio_wr_eq_destroy(hw, NULL, i, NULL);
808*a3667aaeSNaresh Kumar Inna 				if ((rv == -EBUSY) || (rv == -ETIMEDOUT))
809*a3667aaeSNaresh Kumar Inna 					cmd = false;
810*a3667aaeSNaresh Kumar Inna 
811*a3667aaeSNaresh Kumar Inna 				csio_q_eqid(hw, i) = CSIO_MAX_QID;
812*a3667aaeSNaresh Kumar Inna 			}
813*a3667aaeSNaresh Kumar Inna 		case CSIO_INGRESS:
814*a3667aaeSNaresh Kumar Inna 			if (csio_q_iqid(hw, i) != CSIO_MAX_QID) {
815*a3667aaeSNaresh Kumar Inna 				csio_wr_cleanup_iq_ftr(hw, i);
816*a3667aaeSNaresh Kumar Inna 				if (!cmd) {
817*a3667aaeSNaresh Kumar Inna 					csio_q_iqid(hw, i) = CSIO_MAX_QID;
818*a3667aaeSNaresh Kumar Inna 					flq_idx = csio_q_iq_flq_idx(hw, i);
819*a3667aaeSNaresh Kumar Inna 					if (flq_idx != -1)
820*a3667aaeSNaresh Kumar Inna 						csio_q_flid(hw, flq_idx) =
821*a3667aaeSNaresh Kumar Inna 								CSIO_MAX_QID;
822*a3667aaeSNaresh Kumar Inna 					continue;
823*a3667aaeSNaresh Kumar Inna 				}
824*a3667aaeSNaresh Kumar Inna 
825*a3667aaeSNaresh Kumar Inna 				rv = csio_wr_iq_destroy(hw, NULL, i, NULL);
826*a3667aaeSNaresh Kumar Inna 				if ((rv == -EBUSY) || (rv == -ETIMEDOUT))
827*a3667aaeSNaresh Kumar Inna 					cmd = false;
828*a3667aaeSNaresh Kumar Inna 
829*a3667aaeSNaresh Kumar Inna 				csio_q_iqid(hw, i) = CSIO_MAX_QID;
830*a3667aaeSNaresh Kumar Inna 				flq_idx = csio_q_iq_flq_idx(hw, i);
831*a3667aaeSNaresh Kumar Inna 				if (flq_idx != -1)
832*a3667aaeSNaresh Kumar Inna 					csio_q_flid(hw, flq_idx) = CSIO_MAX_QID;
833*a3667aaeSNaresh Kumar Inna 			}
834*a3667aaeSNaresh Kumar Inna 		default:
835*a3667aaeSNaresh Kumar Inna 			break;
836*a3667aaeSNaresh Kumar Inna 		}
837*a3667aaeSNaresh Kumar Inna 	}
838*a3667aaeSNaresh Kumar Inna 
839*a3667aaeSNaresh Kumar Inna 	hw->flags &= ~CSIO_HWF_Q_FW_ALLOCED;
840*a3667aaeSNaresh Kumar Inna 
841*a3667aaeSNaresh Kumar Inna 	return 0;
842*a3667aaeSNaresh Kumar Inna }
843*a3667aaeSNaresh Kumar Inna 
844*a3667aaeSNaresh Kumar Inna /*
845*a3667aaeSNaresh Kumar Inna  * csio_wr_get - Get requested size of WR entry/entries from queue.
846*a3667aaeSNaresh Kumar Inna  * @hw: HW module.
847*a3667aaeSNaresh Kumar Inna  * @qidx: Index of queue.
848*a3667aaeSNaresh Kumar Inna  * @size: Cumulative size of Work request(s).
849*a3667aaeSNaresh Kumar Inna  * @wrp: Work request pair.
850*a3667aaeSNaresh Kumar Inna  *
851*a3667aaeSNaresh Kumar Inna  * If requested credits are available, return the start address of the
852*a3667aaeSNaresh Kumar Inna  * work request in the work request pair. Set pidx accordingly and
853*a3667aaeSNaresh Kumar Inna  * return.
854*a3667aaeSNaresh Kumar Inna  *
855*a3667aaeSNaresh Kumar Inna  * NOTE about WR pair:
856*a3667aaeSNaresh Kumar Inna  * ==================
857*a3667aaeSNaresh Kumar Inna  * A WR can start towards the end of a queue, and then continue at the
858*a3667aaeSNaresh Kumar Inna  * beginning, since the queue is considered to be circular. This will
859*a3667aaeSNaresh Kumar Inna  * require a pair of address/size to be passed back to the caller -
860*a3667aaeSNaresh Kumar Inna  * hence Work request pair format.
861*a3667aaeSNaresh Kumar Inna  */
862*a3667aaeSNaresh Kumar Inna int
863*a3667aaeSNaresh Kumar Inna csio_wr_get(struct csio_hw *hw, int qidx, uint32_t size,
864*a3667aaeSNaresh Kumar Inna 	    struct csio_wr_pair *wrp)
865*a3667aaeSNaresh Kumar Inna {
866*a3667aaeSNaresh Kumar Inna 	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
867*a3667aaeSNaresh Kumar Inna 	struct csio_q *q = wrm->q_arr[qidx];
868*a3667aaeSNaresh Kumar Inna 	void *cwr = (void *)((uintptr_t)(q->vstart) +
869*a3667aaeSNaresh Kumar Inna 						(q->pidx * CSIO_QCREDIT_SZ));
870*a3667aaeSNaresh Kumar Inna 	struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap;
871*a3667aaeSNaresh Kumar Inna 	uint16_t cidx = q->cidx = ntohs(stp->cidx);
872*a3667aaeSNaresh Kumar Inna 	uint16_t pidx = q->pidx;
873*a3667aaeSNaresh Kumar Inna 	uint32_t req_sz	= ALIGN(size, CSIO_QCREDIT_SZ);
874*a3667aaeSNaresh Kumar Inna 	int req_credits	= req_sz / CSIO_QCREDIT_SZ;
875*a3667aaeSNaresh Kumar Inna 	int credits;
876*a3667aaeSNaresh Kumar Inna 
877*a3667aaeSNaresh Kumar Inna 	CSIO_DB_ASSERT(q->owner != NULL);
878*a3667aaeSNaresh Kumar Inna 	CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx));
879*a3667aaeSNaresh Kumar Inna 	CSIO_DB_ASSERT(cidx <= q->credits);
880*a3667aaeSNaresh Kumar Inna 
881*a3667aaeSNaresh Kumar Inna 	/* Calculate credits */
882*a3667aaeSNaresh Kumar Inna 	if (pidx > cidx) {
883*a3667aaeSNaresh Kumar Inna 		credits = q->credits - (pidx - cidx) - 1;
884*a3667aaeSNaresh Kumar Inna 	} else if (cidx > pidx) {
885*a3667aaeSNaresh Kumar Inna 		credits = cidx - pidx - 1;
886*a3667aaeSNaresh Kumar Inna 	} else {
887*a3667aaeSNaresh Kumar Inna 		/* cidx == pidx, empty queue */
888*a3667aaeSNaresh Kumar Inna 		credits = q->credits;
889*a3667aaeSNaresh Kumar Inna 		CSIO_INC_STATS(q, n_qempty);
890*a3667aaeSNaresh Kumar Inna 	}
891*a3667aaeSNaresh Kumar Inna 
892*a3667aaeSNaresh Kumar Inna 	/*
893*a3667aaeSNaresh Kumar Inna 	 * Check if we have enough credits.
894*a3667aaeSNaresh Kumar Inna 	 * credits = 1 implies queue is full.
895*a3667aaeSNaresh Kumar Inna 	 */
896*a3667aaeSNaresh Kumar Inna 	if (!credits || (req_credits > credits)) {
897*a3667aaeSNaresh Kumar Inna 		CSIO_INC_STATS(q, n_qfull);
898*a3667aaeSNaresh Kumar Inna 		return -EBUSY;
899*a3667aaeSNaresh Kumar Inna 	}
900*a3667aaeSNaresh Kumar Inna 
901*a3667aaeSNaresh Kumar Inna 	/*
902*a3667aaeSNaresh Kumar Inna 	 * If we are here, we have enough credits to satisfy the
903*a3667aaeSNaresh Kumar Inna 	 * request. Check if we are near the end of q, and if WR spills over.
904*a3667aaeSNaresh Kumar Inna 	 * If it does, use the first addr/size to cover the queue until
905*a3667aaeSNaresh Kumar Inna 	 * the end. Fit the remainder portion of the request at the top
906*a3667aaeSNaresh Kumar Inna 	 * of queue and return it in the second addr/len. Set pidx
907*a3667aaeSNaresh Kumar Inna 	 * accordingly.
908*a3667aaeSNaresh Kumar Inna 	 */
909*a3667aaeSNaresh Kumar Inna 	if (unlikely(((uintptr_t)cwr + req_sz) > (uintptr_t)(q->vwrap))) {
910*a3667aaeSNaresh Kumar Inna 		wrp->addr1 = cwr;
911*a3667aaeSNaresh Kumar Inna 		wrp->size1 = (uint32_t)((uintptr_t)q->vwrap - (uintptr_t)cwr);
912*a3667aaeSNaresh Kumar Inna 		wrp->addr2 = q->vstart;
913*a3667aaeSNaresh Kumar Inna 		wrp->size2 = req_sz - wrp->size1;
914*a3667aaeSNaresh Kumar Inna 		q->pidx	= (uint16_t)(ALIGN(wrp->size2, CSIO_QCREDIT_SZ) /
915*a3667aaeSNaresh Kumar Inna 							CSIO_QCREDIT_SZ);
916*a3667aaeSNaresh Kumar Inna 		CSIO_INC_STATS(q, n_qwrap);
917*a3667aaeSNaresh Kumar Inna 		CSIO_INC_STATS(q, n_eq_wr_split);
918*a3667aaeSNaresh Kumar Inna 	} else {
919*a3667aaeSNaresh Kumar Inna 		wrp->addr1 = cwr;
920*a3667aaeSNaresh Kumar Inna 		wrp->size1 = req_sz;
921*a3667aaeSNaresh Kumar Inna 		wrp->addr2 = NULL;
922*a3667aaeSNaresh Kumar Inna 		wrp->size2 = 0;
923*a3667aaeSNaresh Kumar Inna 		q->pidx	+= (uint16_t)req_credits;
924*a3667aaeSNaresh Kumar Inna 
925*a3667aaeSNaresh Kumar Inna 		/* We are the end of queue, roll back pidx to top of queue */
926*a3667aaeSNaresh Kumar Inna 		if (unlikely(q->pidx == q->credits)) {
927*a3667aaeSNaresh Kumar Inna 			q->pidx = 0;
928*a3667aaeSNaresh Kumar Inna 			CSIO_INC_STATS(q, n_qwrap);
929*a3667aaeSNaresh Kumar Inna 		}
930*a3667aaeSNaresh Kumar Inna 	}
931*a3667aaeSNaresh Kumar Inna 
932*a3667aaeSNaresh Kumar Inna 	q->inc_idx = (uint16_t)req_credits;
933*a3667aaeSNaresh Kumar Inna 
934*a3667aaeSNaresh Kumar Inna 	CSIO_INC_STATS(q, n_tot_reqs);
935*a3667aaeSNaresh Kumar Inna 
936*a3667aaeSNaresh Kumar Inna 	return 0;
937*a3667aaeSNaresh Kumar Inna }
938*a3667aaeSNaresh Kumar Inna 
939*a3667aaeSNaresh Kumar Inna /*
940*a3667aaeSNaresh Kumar Inna  * csio_wr_copy_to_wrp - Copies given data into WR.
941*a3667aaeSNaresh Kumar Inna  * @data_buf - Data buffer
942*a3667aaeSNaresh Kumar Inna  * @wrp - Work request pair.
943*a3667aaeSNaresh Kumar Inna  * @wr_off - Work request offset.
944*a3667aaeSNaresh Kumar Inna  * @data_len - Data length.
945*a3667aaeSNaresh Kumar Inna  *
946*a3667aaeSNaresh Kumar Inna  * Copies the given data in Work Request. Work request pair(wrp) specifies
947*a3667aaeSNaresh Kumar Inna  * address information of Work request.
948*a3667aaeSNaresh Kumar Inna  * Returns: none
949*a3667aaeSNaresh Kumar Inna  */
950*a3667aaeSNaresh Kumar Inna void
951*a3667aaeSNaresh Kumar Inna csio_wr_copy_to_wrp(void *data_buf, struct csio_wr_pair *wrp,
952*a3667aaeSNaresh Kumar Inna 		   uint32_t wr_off, uint32_t data_len)
953*a3667aaeSNaresh Kumar Inna {
954*a3667aaeSNaresh Kumar Inna 	uint32_t nbytes;
955*a3667aaeSNaresh Kumar Inna 
956*a3667aaeSNaresh Kumar Inna 	/* Number of space available in buffer addr1 of WRP */
957*a3667aaeSNaresh Kumar Inna 	nbytes = ((wrp->size1 - wr_off) >= data_len) ?
958*a3667aaeSNaresh Kumar Inna 					data_len : (wrp->size1 - wr_off);
959*a3667aaeSNaresh Kumar Inna 
960*a3667aaeSNaresh Kumar Inna 	memcpy((uint8_t *) wrp->addr1 + wr_off, data_buf, nbytes);
961*a3667aaeSNaresh Kumar Inna 	data_len -= nbytes;
962*a3667aaeSNaresh Kumar Inna 
963*a3667aaeSNaresh Kumar Inna 	/* Write the remaining data from the begining of circular buffer */
964*a3667aaeSNaresh Kumar Inna 	if (data_len) {
965*a3667aaeSNaresh Kumar Inna 		CSIO_DB_ASSERT(data_len <= wrp->size2);
966*a3667aaeSNaresh Kumar Inna 		CSIO_DB_ASSERT(wrp->addr2 != NULL);
967*a3667aaeSNaresh Kumar Inna 		memcpy(wrp->addr2, (uint8_t *) data_buf + nbytes, data_len);
968*a3667aaeSNaresh Kumar Inna 	}
969*a3667aaeSNaresh Kumar Inna }
970*a3667aaeSNaresh Kumar Inna 
971*a3667aaeSNaresh Kumar Inna /*
972*a3667aaeSNaresh Kumar Inna  * csio_wr_issue - Notify chip of Work request.
973*a3667aaeSNaresh Kumar Inna  * @hw: HW module.
974*a3667aaeSNaresh Kumar Inna  * @qidx: Index of queue.
975*a3667aaeSNaresh Kumar Inna  * @prio: 0: Low priority, 1: High priority
976*a3667aaeSNaresh Kumar Inna  *
977*a3667aaeSNaresh Kumar Inna  * Rings the SGE Doorbell by writing the current producer index of the passed
978*a3667aaeSNaresh Kumar Inna  * in queue into the register.
979*a3667aaeSNaresh Kumar Inna  *
980*a3667aaeSNaresh Kumar Inna  */
981*a3667aaeSNaresh Kumar Inna int
982*a3667aaeSNaresh Kumar Inna csio_wr_issue(struct csio_hw *hw, int qidx, bool prio)
983*a3667aaeSNaresh Kumar Inna {
984*a3667aaeSNaresh Kumar Inna 	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
985*a3667aaeSNaresh Kumar Inna 	struct csio_q *q = wrm->q_arr[qidx];
986*a3667aaeSNaresh Kumar Inna 
987*a3667aaeSNaresh Kumar Inna 	CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx));
988*a3667aaeSNaresh Kumar Inna 
989*a3667aaeSNaresh Kumar Inna 	wmb();
990*a3667aaeSNaresh Kumar Inna 	/* Ring SGE Doorbell writing q->pidx into it */
991*a3667aaeSNaresh Kumar Inna 	csio_wr_reg32(hw, DBPRIO(prio) | QID(q->un.eq.physeqid) |
992*a3667aaeSNaresh Kumar Inna 		      PIDX(q->inc_idx), MYPF_REG(SGE_PF_KDOORBELL));
993*a3667aaeSNaresh Kumar Inna 	q->inc_idx = 0;
994*a3667aaeSNaresh Kumar Inna 
995*a3667aaeSNaresh Kumar Inna 	return 0;
996*a3667aaeSNaresh Kumar Inna }
997*a3667aaeSNaresh Kumar Inna 
998*a3667aaeSNaresh Kumar Inna static inline uint32_t
999*a3667aaeSNaresh Kumar Inna csio_wr_avail_qcredits(struct csio_q *q)
1000*a3667aaeSNaresh Kumar Inna {
1001*a3667aaeSNaresh Kumar Inna 	if (q->pidx > q->cidx)
1002*a3667aaeSNaresh Kumar Inna 		return q->pidx - q->cidx;
1003*a3667aaeSNaresh Kumar Inna 	else if (q->cidx > q->pidx)
1004*a3667aaeSNaresh Kumar Inna 		return q->credits - (q->cidx - q->pidx);
1005*a3667aaeSNaresh Kumar Inna 	else
1006*a3667aaeSNaresh Kumar Inna 		return 0;	/* cidx == pidx, empty queue */
1007*a3667aaeSNaresh Kumar Inna }
1008*a3667aaeSNaresh Kumar Inna 
1009*a3667aaeSNaresh Kumar Inna /*
1010*a3667aaeSNaresh Kumar Inna  * csio_wr_inval_flq_buf - Invalidate a free list buffer entry.
1011*a3667aaeSNaresh Kumar Inna  * @hw: HW module.
1012*a3667aaeSNaresh Kumar Inna  * @flq: The freelist queue.
1013*a3667aaeSNaresh Kumar Inna  *
1014*a3667aaeSNaresh Kumar Inna  * Invalidate the driver's version of a freelist buffer entry,
1015*a3667aaeSNaresh Kumar Inna  * without freeing the associated the DMA memory. The entry
1016*a3667aaeSNaresh Kumar Inna  * to be invalidated is picked up from the current Free list
1017*a3667aaeSNaresh Kumar Inna  * queue cidx.
1018*a3667aaeSNaresh Kumar Inna  *
1019*a3667aaeSNaresh Kumar Inna  */
1020*a3667aaeSNaresh Kumar Inna static inline void
1021*a3667aaeSNaresh Kumar Inna csio_wr_inval_flq_buf(struct csio_hw *hw, struct csio_q *flq)
1022*a3667aaeSNaresh Kumar Inna {
1023*a3667aaeSNaresh Kumar Inna 	flq->cidx++;
1024*a3667aaeSNaresh Kumar Inna 	if (flq->cidx == flq->credits) {
1025*a3667aaeSNaresh Kumar Inna 		flq->cidx = 0;
1026*a3667aaeSNaresh Kumar Inna 		CSIO_INC_STATS(flq, n_qwrap);
1027*a3667aaeSNaresh Kumar Inna 	}
1028*a3667aaeSNaresh Kumar Inna }
1029*a3667aaeSNaresh Kumar Inna 
1030*a3667aaeSNaresh Kumar Inna /*
1031*a3667aaeSNaresh Kumar Inna  * csio_wr_process_fl - Process a freelist completion.
1032*a3667aaeSNaresh Kumar Inna  * @hw: HW module.
1033*a3667aaeSNaresh Kumar Inna  * @q: The ingress queue attached to the Freelist.
1034*a3667aaeSNaresh Kumar Inna  * @wr: The freelist completion WR in the ingress queue.
1035*a3667aaeSNaresh Kumar Inna  * @len_to_qid: The lower 32-bits of the first flit of the RSP footer
1036*a3667aaeSNaresh Kumar Inna  * @iq_handler: Caller's handler for this completion.
1037*a3667aaeSNaresh Kumar Inna  * @priv: Private pointer of caller
1038*a3667aaeSNaresh Kumar Inna  *
1039*a3667aaeSNaresh Kumar Inna  */
1040*a3667aaeSNaresh Kumar Inna static inline void
1041*a3667aaeSNaresh Kumar Inna csio_wr_process_fl(struct csio_hw *hw, struct csio_q *q,
1042*a3667aaeSNaresh Kumar Inna 		   void *wr, uint32_t len_to_qid,
1043*a3667aaeSNaresh Kumar Inna 		   void (*iq_handler)(struct csio_hw *, void *,
1044*a3667aaeSNaresh Kumar Inna 				      uint32_t, struct csio_fl_dma_buf *,
1045*a3667aaeSNaresh Kumar Inna 				      void *),
1046*a3667aaeSNaresh Kumar Inna 		   void *priv)
1047*a3667aaeSNaresh Kumar Inna {
1048*a3667aaeSNaresh Kumar Inna 	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1049*a3667aaeSNaresh Kumar Inna 	struct csio_sge *sge = &wrm->sge;
1050*a3667aaeSNaresh Kumar Inna 	struct csio_fl_dma_buf flb;
1051*a3667aaeSNaresh Kumar Inna 	struct csio_dma_buf *buf, *fbuf;
1052*a3667aaeSNaresh Kumar Inna 	uint32_t bufsz, len, lastlen = 0;
1053*a3667aaeSNaresh Kumar Inna 	int n;
1054*a3667aaeSNaresh Kumar Inna 	struct csio_q *flq = hw->wrm.q_arr[q->un.iq.flq_idx];
1055*a3667aaeSNaresh Kumar Inna 
1056*a3667aaeSNaresh Kumar Inna 	CSIO_DB_ASSERT(flq != NULL);
1057*a3667aaeSNaresh Kumar Inna 
1058*a3667aaeSNaresh Kumar Inna 	len = len_to_qid;
1059*a3667aaeSNaresh Kumar Inna 
1060*a3667aaeSNaresh Kumar Inna 	if (len & IQWRF_NEWBUF) {
1061*a3667aaeSNaresh Kumar Inna 		if (flq->un.fl.offset > 0) {
1062*a3667aaeSNaresh Kumar Inna 			csio_wr_inval_flq_buf(hw, flq);
1063*a3667aaeSNaresh Kumar Inna 			flq->un.fl.offset = 0;
1064*a3667aaeSNaresh Kumar Inna 		}
1065*a3667aaeSNaresh Kumar Inna 		len = IQWRF_LEN_GET(len);
1066*a3667aaeSNaresh Kumar Inna 	}
1067*a3667aaeSNaresh Kumar Inna 
1068*a3667aaeSNaresh Kumar Inna 	CSIO_DB_ASSERT(len != 0);
1069*a3667aaeSNaresh Kumar Inna 
1070*a3667aaeSNaresh Kumar Inna 	flb.totlen = len;
1071*a3667aaeSNaresh Kumar Inna 
1072*a3667aaeSNaresh Kumar Inna 	/* Consume all freelist buffers used for len bytes */
1073*a3667aaeSNaresh Kumar Inna 	for (n = 0, fbuf = flb.flbufs; ; n++, fbuf++) {
1074*a3667aaeSNaresh Kumar Inna 		buf = &flq->un.fl.bufs[flq->cidx];
1075*a3667aaeSNaresh Kumar Inna 		bufsz = csio_wr_fl_bufsz(sge, buf);
1076*a3667aaeSNaresh Kumar Inna 
1077*a3667aaeSNaresh Kumar Inna 		fbuf->paddr	= buf->paddr;
1078*a3667aaeSNaresh Kumar Inna 		fbuf->vaddr	= buf->vaddr;
1079*a3667aaeSNaresh Kumar Inna 
1080*a3667aaeSNaresh Kumar Inna 		flb.offset	= flq->un.fl.offset;
1081*a3667aaeSNaresh Kumar Inna 		lastlen		= min(bufsz, len);
1082*a3667aaeSNaresh Kumar Inna 		fbuf->len	= lastlen;
1083*a3667aaeSNaresh Kumar Inna 
1084*a3667aaeSNaresh Kumar Inna 		len -= lastlen;
1085*a3667aaeSNaresh Kumar Inna 		if (!len)
1086*a3667aaeSNaresh Kumar Inna 			break;
1087*a3667aaeSNaresh Kumar Inna 		csio_wr_inval_flq_buf(hw, flq);
1088*a3667aaeSNaresh Kumar Inna 	}
1089*a3667aaeSNaresh Kumar Inna 
1090*a3667aaeSNaresh Kumar Inna 	flb.defer_free = flq->un.fl.packen ? 0 : 1;
1091*a3667aaeSNaresh Kumar Inna 
1092*a3667aaeSNaresh Kumar Inna 	iq_handler(hw, wr, q->wr_sz - sizeof(struct csio_iqwr_footer),
1093*a3667aaeSNaresh Kumar Inna 		   &flb, priv);
1094*a3667aaeSNaresh Kumar Inna 
1095*a3667aaeSNaresh Kumar Inna 	if (flq->un.fl.packen)
1096*a3667aaeSNaresh Kumar Inna 		flq->un.fl.offset += ALIGN(lastlen, sge->csio_fl_align);
1097*a3667aaeSNaresh Kumar Inna 	else
1098*a3667aaeSNaresh Kumar Inna 		csio_wr_inval_flq_buf(hw, flq);
1099*a3667aaeSNaresh Kumar Inna 
1100*a3667aaeSNaresh Kumar Inna }
1101*a3667aaeSNaresh Kumar Inna 
1102*a3667aaeSNaresh Kumar Inna /*
1103*a3667aaeSNaresh Kumar Inna  * csio_is_new_iqwr - Is this a new Ingress queue entry ?
1104*a3667aaeSNaresh Kumar Inna  * @q: Ingress quueue.
1105*a3667aaeSNaresh Kumar Inna  * @ftr: Ingress queue WR SGE footer.
1106*a3667aaeSNaresh Kumar Inna  *
1107*a3667aaeSNaresh Kumar Inna  * The entry is new if our generation bit matches the corresponding
1108*a3667aaeSNaresh Kumar Inna  * bit in the footer of the current WR.
1109*a3667aaeSNaresh Kumar Inna  */
1110*a3667aaeSNaresh Kumar Inna static inline bool
1111*a3667aaeSNaresh Kumar Inna csio_is_new_iqwr(struct csio_q *q, struct csio_iqwr_footer *ftr)
1112*a3667aaeSNaresh Kumar Inna {
1113*a3667aaeSNaresh Kumar Inna 	return (q->un.iq.genbit == (ftr->u.type_gen >> IQWRF_GEN_SHIFT));
1114*a3667aaeSNaresh Kumar Inna }
1115*a3667aaeSNaresh Kumar Inna 
1116*a3667aaeSNaresh Kumar Inna /*
1117*a3667aaeSNaresh Kumar Inna  * csio_wr_process_iq - Process elements in Ingress queue.
1118*a3667aaeSNaresh Kumar Inna  * @hw:  HW pointer
1119*a3667aaeSNaresh Kumar Inna  * @qidx: Index of queue
1120*a3667aaeSNaresh Kumar Inna  * @iq_handler: Handler for this queue
1121*a3667aaeSNaresh Kumar Inna  * @priv: Caller's private pointer
1122*a3667aaeSNaresh Kumar Inna  *
1123*a3667aaeSNaresh Kumar Inna  * This routine walks through every entry of the ingress queue, calling
1124*a3667aaeSNaresh Kumar Inna  * the provided iq_handler with the entry, until the generation bit
1125*a3667aaeSNaresh Kumar Inna  * flips.
1126*a3667aaeSNaresh Kumar Inna  */
1127*a3667aaeSNaresh Kumar Inna int
1128*a3667aaeSNaresh Kumar Inna csio_wr_process_iq(struct csio_hw *hw, struct csio_q *q,
1129*a3667aaeSNaresh Kumar Inna 		   void (*iq_handler)(struct csio_hw *, void *,
1130*a3667aaeSNaresh Kumar Inna 				      uint32_t, struct csio_fl_dma_buf *,
1131*a3667aaeSNaresh Kumar Inna 				      void *),
1132*a3667aaeSNaresh Kumar Inna 		   void *priv)
1133*a3667aaeSNaresh Kumar Inna {
1134*a3667aaeSNaresh Kumar Inna 	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1135*a3667aaeSNaresh Kumar Inna 	void *wr = (void *)((uintptr_t)q->vstart + (q->cidx * q->wr_sz));
1136*a3667aaeSNaresh Kumar Inna 	struct csio_iqwr_footer *ftr;
1137*a3667aaeSNaresh Kumar Inna 	uint32_t wr_type, fw_qid, qid;
1138*a3667aaeSNaresh Kumar Inna 	struct csio_q *q_completed;
1139*a3667aaeSNaresh Kumar Inna 	struct csio_q *flq = csio_iq_has_fl(q) ?
1140*a3667aaeSNaresh Kumar Inna 					wrm->q_arr[q->un.iq.flq_idx] : NULL;
1141*a3667aaeSNaresh Kumar Inna 	int rv = 0;
1142*a3667aaeSNaresh Kumar Inna 
1143*a3667aaeSNaresh Kumar Inna 	/* Get the footer */
1144*a3667aaeSNaresh Kumar Inna 	ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
1145*a3667aaeSNaresh Kumar Inna 					  (q->wr_sz - sizeof(*ftr)));
1146*a3667aaeSNaresh Kumar Inna 
1147*a3667aaeSNaresh Kumar Inna 	/*
1148*a3667aaeSNaresh Kumar Inna 	 * When q wrapped around last time, driver should have inverted
1149*a3667aaeSNaresh Kumar Inna 	 * ic.genbit as well.
1150*a3667aaeSNaresh Kumar Inna 	 */
1151*a3667aaeSNaresh Kumar Inna 	while (csio_is_new_iqwr(q, ftr)) {
1152*a3667aaeSNaresh Kumar Inna 
1153*a3667aaeSNaresh Kumar Inna 		CSIO_DB_ASSERT(((uintptr_t)wr + q->wr_sz) <=
1154*a3667aaeSNaresh Kumar Inna 						(uintptr_t)q->vwrap);
1155*a3667aaeSNaresh Kumar Inna 		rmb();
1156*a3667aaeSNaresh Kumar Inna 		wr_type = IQWRF_TYPE_GET(ftr->u.type_gen);
1157*a3667aaeSNaresh Kumar Inna 
1158*a3667aaeSNaresh Kumar Inna 		switch (wr_type) {
1159*a3667aaeSNaresh Kumar Inna 		case X_RSPD_TYPE_CPL:
1160*a3667aaeSNaresh Kumar Inna 			/* Subtract footer from WR len */
1161*a3667aaeSNaresh Kumar Inna 			iq_handler(hw, wr, q->wr_sz - sizeof(*ftr), NULL, priv);
1162*a3667aaeSNaresh Kumar Inna 			break;
1163*a3667aaeSNaresh Kumar Inna 		case X_RSPD_TYPE_FLBUF:
1164*a3667aaeSNaresh Kumar Inna 			csio_wr_process_fl(hw, q, wr,
1165*a3667aaeSNaresh Kumar Inna 					   ntohl(ftr->pldbuflen_qid),
1166*a3667aaeSNaresh Kumar Inna 					   iq_handler, priv);
1167*a3667aaeSNaresh Kumar Inna 			break;
1168*a3667aaeSNaresh Kumar Inna 		case X_RSPD_TYPE_INTR:
1169*a3667aaeSNaresh Kumar Inna 			fw_qid = ntohl(ftr->pldbuflen_qid);
1170*a3667aaeSNaresh Kumar Inna 			qid = fw_qid - wrm->fw_iq_start;
1171*a3667aaeSNaresh Kumar Inna 			q_completed = hw->wrm.intr_map[qid];
1172*a3667aaeSNaresh Kumar Inna 
1173*a3667aaeSNaresh Kumar Inna 			if (unlikely(qid ==
1174*a3667aaeSNaresh Kumar Inna 					csio_q_physiqid(hw, hw->intr_iq_idx))) {
1175*a3667aaeSNaresh Kumar Inna 				/*
1176*a3667aaeSNaresh Kumar Inna 				 * We are already in the Forward Interrupt
1177*a3667aaeSNaresh Kumar Inna 				 * Interrupt Queue Service! Do-not service
1178*a3667aaeSNaresh Kumar Inna 				 * again!
1179*a3667aaeSNaresh Kumar Inna 				 *
1180*a3667aaeSNaresh Kumar Inna 				 */
1181*a3667aaeSNaresh Kumar Inna 			} else {
1182*a3667aaeSNaresh Kumar Inna 				CSIO_DB_ASSERT(q_completed);
1183*a3667aaeSNaresh Kumar Inna 				CSIO_DB_ASSERT(
1184*a3667aaeSNaresh Kumar Inna 					q_completed->un.iq.iq_intx_handler);
1185*a3667aaeSNaresh Kumar Inna 
1186*a3667aaeSNaresh Kumar Inna 				/* Call the queue handler. */
1187*a3667aaeSNaresh Kumar Inna 				q_completed->un.iq.iq_intx_handler(hw, NULL,
1188*a3667aaeSNaresh Kumar Inna 						0, NULL, (void *)q_completed);
1189*a3667aaeSNaresh Kumar Inna 			}
1190*a3667aaeSNaresh Kumar Inna 			break;
1191*a3667aaeSNaresh Kumar Inna 		default:
1192*a3667aaeSNaresh Kumar Inna 			csio_warn(hw, "Unknown resp type 0x%x received\n",
1193*a3667aaeSNaresh Kumar Inna 				 wr_type);
1194*a3667aaeSNaresh Kumar Inna 			CSIO_INC_STATS(q, n_rsp_unknown);
1195*a3667aaeSNaresh Kumar Inna 			break;
1196*a3667aaeSNaresh Kumar Inna 		}
1197*a3667aaeSNaresh Kumar Inna 
1198*a3667aaeSNaresh Kumar Inna 		/*
1199*a3667aaeSNaresh Kumar Inna 		 * Ingress *always* has fixed size WR entries. Therefore,
1200*a3667aaeSNaresh Kumar Inna 		 * there should always be complete WRs towards the end of
1201*a3667aaeSNaresh Kumar Inna 		 * queue.
1202*a3667aaeSNaresh Kumar Inna 		 */
1203*a3667aaeSNaresh Kumar Inna 		if (((uintptr_t)wr + q->wr_sz) == (uintptr_t)q->vwrap) {
1204*a3667aaeSNaresh Kumar Inna 
1205*a3667aaeSNaresh Kumar Inna 			/* Roll over to start of queue */
1206*a3667aaeSNaresh Kumar Inna 			q->cidx = 0;
1207*a3667aaeSNaresh Kumar Inna 			wr	= q->vstart;
1208*a3667aaeSNaresh Kumar Inna 
1209*a3667aaeSNaresh Kumar Inna 			/* Toggle genbit */
1210*a3667aaeSNaresh Kumar Inna 			q->un.iq.genbit ^= 0x1;
1211*a3667aaeSNaresh Kumar Inna 
1212*a3667aaeSNaresh Kumar Inna 			CSIO_INC_STATS(q, n_qwrap);
1213*a3667aaeSNaresh Kumar Inna 		} else {
1214*a3667aaeSNaresh Kumar Inna 			q->cidx++;
1215*a3667aaeSNaresh Kumar Inna 			wr	= (void *)((uintptr_t)(q->vstart) +
1216*a3667aaeSNaresh Kumar Inna 					   (q->cidx * q->wr_sz));
1217*a3667aaeSNaresh Kumar Inna 		}
1218*a3667aaeSNaresh Kumar Inna 
1219*a3667aaeSNaresh Kumar Inna 		ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
1220*a3667aaeSNaresh Kumar Inna 						  (q->wr_sz - sizeof(*ftr)));
1221*a3667aaeSNaresh Kumar Inna 		q->inc_idx++;
1222*a3667aaeSNaresh Kumar Inna 
1223*a3667aaeSNaresh Kumar Inna 	} /* while (q->un.iq.genbit == hdr->genbit) */
1224*a3667aaeSNaresh Kumar Inna 
1225*a3667aaeSNaresh Kumar Inna 	/*
1226*a3667aaeSNaresh Kumar Inna 	 * We need to re-arm SGE interrupts in case we got a stray interrupt,
1227*a3667aaeSNaresh Kumar Inna 	 * especially in msix mode. With INTx, this may be a common occurence.
1228*a3667aaeSNaresh Kumar Inna 	 */
1229*a3667aaeSNaresh Kumar Inna 	if (unlikely(!q->inc_idx)) {
1230*a3667aaeSNaresh Kumar Inna 		CSIO_INC_STATS(q, n_stray_comp);
1231*a3667aaeSNaresh Kumar Inna 		rv = -EINVAL;
1232*a3667aaeSNaresh Kumar Inna 		goto restart;
1233*a3667aaeSNaresh Kumar Inna 	}
1234*a3667aaeSNaresh Kumar Inna 
1235*a3667aaeSNaresh Kumar Inna 	/* Replenish free list buffers if pending falls below low water mark */
1236*a3667aaeSNaresh Kumar Inna 	if (flq) {
1237*a3667aaeSNaresh Kumar Inna 		uint32_t avail  = csio_wr_avail_qcredits(flq);
1238*a3667aaeSNaresh Kumar Inna 		if (avail <= 16) {
1239*a3667aaeSNaresh Kumar Inna 			/* Make sure in FLQ, atleast 1 credit (8 FL buffers)
1240*a3667aaeSNaresh Kumar Inna 			 * remains unpopulated otherwise HW thinks
1241*a3667aaeSNaresh Kumar Inna 			 * FLQ is empty.
1242*a3667aaeSNaresh Kumar Inna 			 */
1243*a3667aaeSNaresh Kumar Inna 			csio_wr_update_fl(hw, flq, (flq->credits - 8) - avail);
1244*a3667aaeSNaresh Kumar Inna 			csio_wr_ring_fldb(hw, flq);
1245*a3667aaeSNaresh Kumar Inna 		}
1246*a3667aaeSNaresh Kumar Inna 	}
1247*a3667aaeSNaresh Kumar Inna 
1248*a3667aaeSNaresh Kumar Inna restart:
1249*a3667aaeSNaresh Kumar Inna 	/* Now inform SGE about our incremental index value */
1250*a3667aaeSNaresh Kumar Inna 	csio_wr_reg32(hw, CIDXINC(q->inc_idx)		|
1251*a3667aaeSNaresh Kumar Inna 			  INGRESSQID(q->un.iq.physiqid)	|
1252*a3667aaeSNaresh Kumar Inna 			  TIMERREG(csio_sge_timer_reg),
1253*a3667aaeSNaresh Kumar Inna 			  MYPF_REG(SGE_PF_GTS));
1254*a3667aaeSNaresh Kumar Inna 	q->stats.n_tot_rsps += q->inc_idx;
1255*a3667aaeSNaresh Kumar Inna 
1256*a3667aaeSNaresh Kumar Inna 	q->inc_idx = 0;
1257*a3667aaeSNaresh Kumar Inna 
1258*a3667aaeSNaresh Kumar Inna 	return rv;
1259*a3667aaeSNaresh Kumar Inna }
1260*a3667aaeSNaresh Kumar Inna 
1261*a3667aaeSNaresh Kumar Inna int
1262*a3667aaeSNaresh Kumar Inna csio_wr_process_iq_idx(struct csio_hw *hw, int qidx,
1263*a3667aaeSNaresh Kumar Inna 		   void (*iq_handler)(struct csio_hw *, void *,
1264*a3667aaeSNaresh Kumar Inna 				      uint32_t, struct csio_fl_dma_buf *,
1265*a3667aaeSNaresh Kumar Inna 				      void *),
1266*a3667aaeSNaresh Kumar Inna 		   void *priv)
1267*a3667aaeSNaresh Kumar Inna {
1268*a3667aaeSNaresh Kumar Inna 	struct csio_wrm *wrm	= csio_hw_to_wrm(hw);
1269*a3667aaeSNaresh Kumar Inna 	struct csio_q	*iq	= wrm->q_arr[qidx];
1270*a3667aaeSNaresh Kumar Inna 
1271*a3667aaeSNaresh Kumar Inna 	return csio_wr_process_iq(hw, iq, iq_handler, priv);
1272*a3667aaeSNaresh Kumar Inna }
1273*a3667aaeSNaresh Kumar Inna 
1274*a3667aaeSNaresh Kumar Inna static int
1275*a3667aaeSNaresh Kumar Inna csio_closest_timer(struct csio_sge *s, int time)
1276*a3667aaeSNaresh Kumar Inna {
1277*a3667aaeSNaresh Kumar Inna 	int i, delta, match = 0, min_delta = INT_MAX;
1278*a3667aaeSNaresh Kumar Inna 
1279*a3667aaeSNaresh Kumar Inna 	for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1280*a3667aaeSNaresh Kumar Inna 		delta = time - s->timer_val[i];
1281*a3667aaeSNaresh Kumar Inna 		if (delta < 0)
1282*a3667aaeSNaresh Kumar Inna 			delta = -delta;
1283*a3667aaeSNaresh Kumar Inna 		if (delta < min_delta) {
1284*a3667aaeSNaresh Kumar Inna 			min_delta = delta;
1285*a3667aaeSNaresh Kumar Inna 			match = i;
1286*a3667aaeSNaresh Kumar Inna 		}
1287*a3667aaeSNaresh Kumar Inna 	}
1288*a3667aaeSNaresh Kumar Inna 	return match;
1289*a3667aaeSNaresh Kumar Inna }
1290*a3667aaeSNaresh Kumar Inna 
1291*a3667aaeSNaresh Kumar Inna static int
1292*a3667aaeSNaresh Kumar Inna csio_closest_thresh(struct csio_sge *s, int cnt)
1293*a3667aaeSNaresh Kumar Inna {
1294*a3667aaeSNaresh Kumar Inna 	int i, delta, match = 0, min_delta = INT_MAX;
1295*a3667aaeSNaresh Kumar Inna 
1296*a3667aaeSNaresh Kumar Inna 	for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1297*a3667aaeSNaresh Kumar Inna 		delta = cnt - s->counter_val[i];
1298*a3667aaeSNaresh Kumar Inna 		if (delta < 0)
1299*a3667aaeSNaresh Kumar Inna 			delta = -delta;
1300*a3667aaeSNaresh Kumar Inna 		if (delta < min_delta) {
1301*a3667aaeSNaresh Kumar Inna 			min_delta = delta;
1302*a3667aaeSNaresh Kumar Inna 			match = i;
1303*a3667aaeSNaresh Kumar Inna 		}
1304*a3667aaeSNaresh Kumar Inna 	}
1305*a3667aaeSNaresh Kumar Inna 	return match;
1306*a3667aaeSNaresh Kumar Inna }
1307*a3667aaeSNaresh Kumar Inna 
1308*a3667aaeSNaresh Kumar Inna static void
1309*a3667aaeSNaresh Kumar Inna csio_wr_fixup_host_params(struct csio_hw *hw)
1310*a3667aaeSNaresh Kumar Inna {
1311*a3667aaeSNaresh Kumar Inna 	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1312*a3667aaeSNaresh Kumar Inna 	struct csio_sge *sge = &wrm->sge;
1313*a3667aaeSNaresh Kumar Inna 	uint32_t clsz = L1_CACHE_BYTES;
1314*a3667aaeSNaresh Kumar Inna 	uint32_t s_hps = PAGE_SHIFT - 10;
1315*a3667aaeSNaresh Kumar Inna 	uint32_t ingpad = 0;
1316*a3667aaeSNaresh Kumar Inna 	uint32_t stat_len = clsz > 64 ? 128 : 64;
1317*a3667aaeSNaresh Kumar Inna 
1318*a3667aaeSNaresh Kumar Inna 	csio_wr_reg32(hw, HOSTPAGESIZEPF0(s_hps) | HOSTPAGESIZEPF1(s_hps) |
1319*a3667aaeSNaresh Kumar Inna 		      HOSTPAGESIZEPF2(s_hps) | HOSTPAGESIZEPF3(s_hps) |
1320*a3667aaeSNaresh Kumar Inna 		      HOSTPAGESIZEPF4(s_hps) | HOSTPAGESIZEPF5(s_hps) |
1321*a3667aaeSNaresh Kumar Inna 		      HOSTPAGESIZEPF6(s_hps) | HOSTPAGESIZEPF7(s_hps),
1322*a3667aaeSNaresh Kumar Inna 		      SGE_HOST_PAGE_SIZE);
1323*a3667aaeSNaresh Kumar Inna 
1324*a3667aaeSNaresh Kumar Inna 	sge->csio_fl_align = clsz < 32 ? 32 : clsz;
1325*a3667aaeSNaresh Kumar Inna 	ingpad = ilog2(sge->csio_fl_align) - 5;
1326*a3667aaeSNaresh Kumar Inna 
1327*a3667aaeSNaresh Kumar Inna 	csio_set_reg_field(hw, SGE_CONTROL, INGPADBOUNDARY_MASK |
1328*a3667aaeSNaresh Kumar Inna 					    EGRSTATUSPAGESIZE(1),
1329*a3667aaeSNaresh Kumar Inna 			   INGPADBOUNDARY(ingpad) |
1330*a3667aaeSNaresh Kumar Inna 			   EGRSTATUSPAGESIZE(stat_len != 64));
1331*a3667aaeSNaresh Kumar Inna 
1332*a3667aaeSNaresh Kumar Inna 	/* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */
1333*a3667aaeSNaresh Kumar Inna 	csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0);
1334*a3667aaeSNaresh Kumar Inna 	csio_wr_reg32(hw,
1335*a3667aaeSNaresh Kumar Inna 		      (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2) +
1336*a3667aaeSNaresh Kumar Inna 		      sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
1337*a3667aaeSNaresh Kumar Inna 		      SGE_FL_BUFFER_SIZE2);
1338*a3667aaeSNaresh Kumar Inna 	csio_wr_reg32(hw,
1339*a3667aaeSNaresh Kumar Inna 		      (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3) +
1340*a3667aaeSNaresh Kumar Inna 		      sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
1341*a3667aaeSNaresh Kumar Inna 		      SGE_FL_BUFFER_SIZE3);
1342*a3667aaeSNaresh Kumar Inna 
1343*a3667aaeSNaresh Kumar Inna 	csio_wr_reg32(hw, HPZ0(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ);
1344*a3667aaeSNaresh Kumar Inna 
1345*a3667aaeSNaresh Kumar Inna 	/* default value of rx_dma_offset of the NIC driver */
1346*a3667aaeSNaresh Kumar Inna 	csio_set_reg_field(hw, SGE_CONTROL, PKTSHIFT_MASK,
1347*a3667aaeSNaresh Kumar Inna 			   PKTSHIFT(CSIO_SGE_RX_DMA_OFFSET));
1348*a3667aaeSNaresh Kumar Inna }
1349*a3667aaeSNaresh Kumar Inna 
1350*a3667aaeSNaresh Kumar Inna static void
1351*a3667aaeSNaresh Kumar Inna csio_init_intr_coalesce_parms(struct csio_hw *hw)
1352*a3667aaeSNaresh Kumar Inna {
1353*a3667aaeSNaresh Kumar Inna 	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1354*a3667aaeSNaresh Kumar Inna 	struct csio_sge *sge = &wrm->sge;
1355*a3667aaeSNaresh Kumar Inna 
1356*a3667aaeSNaresh Kumar Inna 	csio_sge_thresh_reg = csio_closest_thresh(sge, csio_intr_coalesce_cnt);
1357*a3667aaeSNaresh Kumar Inna 	if (csio_intr_coalesce_cnt) {
1358*a3667aaeSNaresh Kumar Inna 		csio_sge_thresh_reg = 0;
1359*a3667aaeSNaresh Kumar Inna 		csio_sge_timer_reg = X_TIMERREG_RESTART_COUNTER;
1360*a3667aaeSNaresh Kumar Inna 		return;
1361*a3667aaeSNaresh Kumar Inna 	}
1362*a3667aaeSNaresh Kumar Inna 
1363*a3667aaeSNaresh Kumar Inna 	csio_sge_timer_reg = csio_closest_timer(sge, csio_intr_coalesce_time);
1364*a3667aaeSNaresh Kumar Inna }
1365*a3667aaeSNaresh Kumar Inna 
1366*a3667aaeSNaresh Kumar Inna /*
1367*a3667aaeSNaresh Kumar Inna  * csio_wr_get_sge - Get SGE register values.
1368*a3667aaeSNaresh Kumar Inna  * @hw: HW module.
1369*a3667aaeSNaresh Kumar Inna  *
1370*a3667aaeSNaresh Kumar Inna  * Used by non-master functions and by master-functions relying on config file.
1371*a3667aaeSNaresh Kumar Inna  */
1372*a3667aaeSNaresh Kumar Inna static void
1373*a3667aaeSNaresh Kumar Inna csio_wr_get_sge(struct csio_hw *hw)
1374*a3667aaeSNaresh Kumar Inna {
1375*a3667aaeSNaresh Kumar Inna 	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1376*a3667aaeSNaresh Kumar Inna 	struct csio_sge *sge = &wrm->sge;
1377*a3667aaeSNaresh Kumar Inna 	uint32_t ingpad;
1378*a3667aaeSNaresh Kumar Inna 	int i;
1379*a3667aaeSNaresh Kumar Inna 	u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
1380*a3667aaeSNaresh Kumar Inna 	u32 ingress_rx_threshold;
1381*a3667aaeSNaresh Kumar Inna 
1382*a3667aaeSNaresh Kumar Inna 	sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL);
1383*a3667aaeSNaresh Kumar Inna 
1384*a3667aaeSNaresh Kumar Inna 	ingpad = INGPADBOUNDARY_GET(sge->sge_control);
1385*a3667aaeSNaresh Kumar Inna 
1386*a3667aaeSNaresh Kumar Inna 	switch (ingpad) {
1387*a3667aaeSNaresh Kumar Inna 	case X_INGPCIEBOUNDARY_32B:
1388*a3667aaeSNaresh Kumar Inna 		sge->csio_fl_align = 32; break;
1389*a3667aaeSNaresh Kumar Inna 	case X_INGPCIEBOUNDARY_64B:
1390*a3667aaeSNaresh Kumar Inna 		sge->csio_fl_align = 64; break;
1391*a3667aaeSNaresh Kumar Inna 	case X_INGPCIEBOUNDARY_128B:
1392*a3667aaeSNaresh Kumar Inna 		sge->csio_fl_align = 128; break;
1393*a3667aaeSNaresh Kumar Inna 	case X_INGPCIEBOUNDARY_256B:
1394*a3667aaeSNaresh Kumar Inna 		sge->csio_fl_align = 256; break;
1395*a3667aaeSNaresh Kumar Inna 	case X_INGPCIEBOUNDARY_512B:
1396*a3667aaeSNaresh Kumar Inna 		sge->csio_fl_align = 512; break;
1397*a3667aaeSNaresh Kumar Inna 	case X_INGPCIEBOUNDARY_1024B:
1398*a3667aaeSNaresh Kumar Inna 		sge->csio_fl_align = 1024; break;
1399*a3667aaeSNaresh Kumar Inna 	case X_INGPCIEBOUNDARY_2048B:
1400*a3667aaeSNaresh Kumar Inna 		sge->csio_fl_align = 2048; break;
1401*a3667aaeSNaresh Kumar Inna 	case X_INGPCIEBOUNDARY_4096B:
1402*a3667aaeSNaresh Kumar Inna 		sge->csio_fl_align = 4096; break;
1403*a3667aaeSNaresh Kumar Inna 	}
1404*a3667aaeSNaresh Kumar Inna 
1405*a3667aaeSNaresh Kumar Inna 	for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++)
1406*a3667aaeSNaresh Kumar Inna 		csio_get_flbuf_size(hw, sge, i);
1407*a3667aaeSNaresh Kumar Inna 
1408*a3667aaeSNaresh Kumar Inna 	timer_value_0_and_1 = csio_rd_reg32(hw, SGE_TIMER_VALUE_0_AND_1);
1409*a3667aaeSNaresh Kumar Inna 	timer_value_2_and_3 = csio_rd_reg32(hw, SGE_TIMER_VALUE_2_AND_3);
1410*a3667aaeSNaresh Kumar Inna 	timer_value_4_and_5 = csio_rd_reg32(hw, SGE_TIMER_VALUE_4_AND_5);
1411*a3667aaeSNaresh Kumar Inna 
1412*a3667aaeSNaresh Kumar Inna 	sge->timer_val[0] = (uint16_t)csio_core_ticks_to_us(hw,
1413*a3667aaeSNaresh Kumar Inna 					TIMERVALUE0_GET(timer_value_0_and_1));
1414*a3667aaeSNaresh Kumar Inna 	sge->timer_val[1] = (uint16_t)csio_core_ticks_to_us(hw,
1415*a3667aaeSNaresh Kumar Inna 					TIMERVALUE1_GET(timer_value_0_and_1));
1416*a3667aaeSNaresh Kumar Inna 	sge->timer_val[2] = (uint16_t)csio_core_ticks_to_us(hw,
1417*a3667aaeSNaresh Kumar Inna 					TIMERVALUE2_GET(timer_value_2_and_3));
1418*a3667aaeSNaresh Kumar Inna 	sge->timer_val[3] = (uint16_t)csio_core_ticks_to_us(hw,
1419*a3667aaeSNaresh Kumar Inna 					TIMERVALUE3_GET(timer_value_2_and_3));
1420*a3667aaeSNaresh Kumar Inna 	sge->timer_val[4] = (uint16_t)csio_core_ticks_to_us(hw,
1421*a3667aaeSNaresh Kumar Inna 					TIMERVALUE4_GET(timer_value_4_and_5));
1422*a3667aaeSNaresh Kumar Inna 	sge->timer_val[5] = (uint16_t)csio_core_ticks_to_us(hw,
1423*a3667aaeSNaresh Kumar Inna 					TIMERVALUE5_GET(timer_value_4_and_5));
1424*a3667aaeSNaresh Kumar Inna 
1425*a3667aaeSNaresh Kumar Inna 	ingress_rx_threshold = csio_rd_reg32(hw, SGE_INGRESS_RX_THRESHOLD);
1426*a3667aaeSNaresh Kumar Inna 	sge->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold);
1427*a3667aaeSNaresh Kumar Inna 	sge->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold);
1428*a3667aaeSNaresh Kumar Inna 	sge->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold);
1429*a3667aaeSNaresh Kumar Inna 	sge->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold);
1430*a3667aaeSNaresh Kumar Inna 
1431*a3667aaeSNaresh Kumar Inna 	csio_init_intr_coalesce_parms(hw);
1432*a3667aaeSNaresh Kumar Inna }
1433*a3667aaeSNaresh Kumar Inna 
1434*a3667aaeSNaresh Kumar Inna /*
1435*a3667aaeSNaresh Kumar Inna  * csio_wr_set_sge - Initialize SGE registers
1436*a3667aaeSNaresh Kumar Inna  * @hw: HW module.
1437*a3667aaeSNaresh Kumar Inna  *
1438*a3667aaeSNaresh Kumar Inna  * Used by Master function to initialize SGE registers in the absence
1439*a3667aaeSNaresh Kumar Inna  * of a config file.
1440*a3667aaeSNaresh Kumar Inna  */
1441*a3667aaeSNaresh Kumar Inna static void
1442*a3667aaeSNaresh Kumar Inna csio_wr_set_sge(struct csio_hw *hw)
1443*a3667aaeSNaresh Kumar Inna {
1444*a3667aaeSNaresh Kumar Inna 	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1445*a3667aaeSNaresh Kumar Inna 	struct csio_sge *sge = &wrm->sge;
1446*a3667aaeSNaresh Kumar Inna 	int i;
1447*a3667aaeSNaresh Kumar Inna 
1448*a3667aaeSNaresh Kumar Inna 	/*
1449*a3667aaeSNaresh Kumar Inna 	 * Set up our basic SGE mode to deliver CPL messages to our Ingress
1450*a3667aaeSNaresh Kumar Inna 	 * Queue and Packet Date to the Free List.
1451*a3667aaeSNaresh Kumar Inna 	 */
1452*a3667aaeSNaresh Kumar Inna 	csio_set_reg_field(hw, SGE_CONTROL, RXPKTCPLMODE(1), RXPKTCPLMODE(1));
1453*a3667aaeSNaresh Kumar Inna 
1454*a3667aaeSNaresh Kumar Inna 	sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL);
1455*a3667aaeSNaresh Kumar Inna 
1456*a3667aaeSNaresh Kumar Inna 	/* sge->csio_fl_align is set up by csio_wr_fixup_host_params(). */
1457*a3667aaeSNaresh Kumar Inna 
1458*a3667aaeSNaresh Kumar Inna 	/*
1459*a3667aaeSNaresh Kumar Inna 	 * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
1460*a3667aaeSNaresh Kumar Inna 	 * and generate an interrupt when this occurs so we can recover.
1461*a3667aaeSNaresh Kumar Inna 	 */
1462*a3667aaeSNaresh Kumar Inna 	csio_set_reg_field(hw, SGE_DBFIFO_STATUS,
1463*a3667aaeSNaresh Kumar Inna 			   HP_INT_THRESH(HP_INT_THRESH_MASK) |
1464*a3667aaeSNaresh Kumar Inna 			   LP_INT_THRESH(LP_INT_THRESH_MASK),
1465*a3667aaeSNaresh Kumar Inna 			   HP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH) |
1466*a3667aaeSNaresh Kumar Inna 			   LP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH));
1467*a3667aaeSNaresh Kumar Inna 	csio_set_reg_field(hw, SGE_DOORBELL_CONTROL, ENABLE_DROP,
1468*a3667aaeSNaresh Kumar Inna 			   ENABLE_DROP);
1469*a3667aaeSNaresh Kumar Inna 
1470*a3667aaeSNaresh Kumar Inna 	/* SGE_FL_BUFFER_SIZE0 is set up by csio_wr_fixup_host_params(). */
1471*a3667aaeSNaresh Kumar Inna 
1472*a3667aaeSNaresh Kumar Inna 	CSIO_SET_FLBUF_SIZE(hw, 1, CSIO_SGE_FLBUF_SIZE1);
1473*a3667aaeSNaresh Kumar Inna 	CSIO_SET_FLBUF_SIZE(hw, 2, CSIO_SGE_FLBUF_SIZE2);
1474*a3667aaeSNaresh Kumar Inna 	CSIO_SET_FLBUF_SIZE(hw, 3, CSIO_SGE_FLBUF_SIZE3);
1475*a3667aaeSNaresh Kumar Inna 	CSIO_SET_FLBUF_SIZE(hw, 4, CSIO_SGE_FLBUF_SIZE4);
1476*a3667aaeSNaresh Kumar Inna 	CSIO_SET_FLBUF_SIZE(hw, 5, CSIO_SGE_FLBUF_SIZE5);
1477*a3667aaeSNaresh Kumar Inna 	CSIO_SET_FLBUF_SIZE(hw, 6, CSIO_SGE_FLBUF_SIZE6);
1478*a3667aaeSNaresh Kumar Inna 	CSIO_SET_FLBUF_SIZE(hw, 7, CSIO_SGE_FLBUF_SIZE7);
1479*a3667aaeSNaresh Kumar Inna 	CSIO_SET_FLBUF_SIZE(hw, 8, CSIO_SGE_FLBUF_SIZE8);
1480*a3667aaeSNaresh Kumar Inna 
1481*a3667aaeSNaresh Kumar Inna 	for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++)
1482*a3667aaeSNaresh Kumar Inna 		csio_get_flbuf_size(hw, sge, i);
1483*a3667aaeSNaresh Kumar Inna 
1484*a3667aaeSNaresh Kumar Inna 	/* Initialize interrupt coalescing attributes */
1485*a3667aaeSNaresh Kumar Inna 	sge->timer_val[0] = CSIO_SGE_TIMER_VAL_0;
1486*a3667aaeSNaresh Kumar Inna 	sge->timer_val[1] = CSIO_SGE_TIMER_VAL_1;
1487*a3667aaeSNaresh Kumar Inna 	sge->timer_val[2] = CSIO_SGE_TIMER_VAL_2;
1488*a3667aaeSNaresh Kumar Inna 	sge->timer_val[3] = CSIO_SGE_TIMER_VAL_3;
1489*a3667aaeSNaresh Kumar Inna 	sge->timer_val[4] = CSIO_SGE_TIMER_VAL_4;
1490*a3667aaeSNaresh Kumar Inna 	sge->timer_val[5] = CSIO_SGE_TIMER_VAL_5;
1491*a3667aaeSNaresh Kumar Inna 
1492*a3667aaeSNaresh Kumar Inna 	sge->counter_val[0] = CSIO_SGE_INT_CNT_VAL_0;
1493*a3667aaeSNaresh Kumar Inna 	sge->counter_val[1] = CSIO_SGE_INT_CNT_VAL_1;
1494*a3667aaeSNaresh Kumar Inna 	sge->counter_val[2] = CSIO_SGE_INT_CNT_VAL_2;
1495*a3667aaeSNaresh Kumar Inna 	sge->counter_val[3] = CSIO_SGE_INT_CNT_VAL_3;
1496*a3667aaeSNaresh Kumar Inna 
1497*a3667aaeSNaresh Kumar Inna 	csio_wr_reg32(hw, THRESHOLD_0(sge->counter_val[0]) |
1498*a3667aaeSNaresh Kumar Inna 		      THRESHOLD_1(sge->counter_val[1]) |
1499*a3667aaeSNaresh Kumar Inna 		      THRESHOLD_2(sge->counter_val[2]) |
1500*a3667aaeSNaresh Kumar Inna 		      THRESHOLD_3(sge->counter_val[3]),
1501*a3667aaeSNaresh Kumar Inna 		      SGE_INGRESS_RX_THRESHOLD);
1502*a3667aaeSNaresh Kumar Inna 
1503*a3667aaeSNaresh Kumar Inna 	csio_wr_reg32(hw,
1504*a3667aaeSNaresh Kumar Inna 		   TIMERVALUE0(csio_us_to_core_ticks(hw, sge->timer_val[0])) |
1505*a3667aaeSNaresh Kumar Inna 		   TIMERVALUE1(csio_us_to_core_ticks(hw, sge->timer_val[1])),
1506*a3667aaeSNaresh Kumar Inna 		   SGE_TIMER_VALUE_0_AND_1);
1507*a3667aaeSNaresh Kumar Inna 
1508*a3667aaeSNaresh Kumar Inna 	csio_wr_reg32(hw,
1509*a3667aaeSNaresh Kumar Inna 		   TIMERVALUE2(csio_us_to_core_ticks(hw, sge->timer_val[2])) |
1510*a3667aaeSNaresh Kumar Inna 		   TIMERVALUE3(csio_us_to_core_ticks(hw, sge->timer_val[3])),
1511*a3667aaeSNaresh Kumar Inna 		   SGE_TIMER_VALUE_2_AND_3);
1512*a3667aaeSNaresh Kumar Inna 
1513*a3667aaeSNaresh Kumar Inna 	csio_wr_reg32(hw,
1514*a3667aaeSNaresh Kumar Inna 		   TIMERVALUE4(csio_us_to_core_ticks(hw, sge->timer_val[4])) |
1515*a3667aaeSNaresh Kumar Inna 		   TIMERVALUE5(csio_us_to_core_ticks(hw, sge->timer_val[5])),
1516*a3667aaeSNaresh Kumar Inna 		   SGE_TIMER_VALUE_4_AND_5);
1517*a3667aaeSNaresh Kumar Inna 
1518*a3667aaeSNaresh Kumar Inna 	csio_init_intr_coalesce_parms(hw);
1519*a3667aaeSNaresh Kumar Inna }
1520*a3667aaeSNaresh Kumar Inna 
1521*a3667aaeSNaresh Kumar Inna void
1522*a3667aaeSNaresh Kumar Inna csio_wr_sge_init(struct csio_hw *hw)
1523*a3667aaeSNaresh Kumar Inna {
1524*a3667aaeSNaresh Kumar Inna 	/*
1525*a3667aaeSNaresh Kumar Inna 	 * If we are master:
1526*a3667aaeSNaresh Kumar Inna 	 *    - If we plan to use the config file, we need to fixup some
1527*a3667aaeSNaresh Kumar Inna 	 *      host specific registers, and read the rest of the SGE
1528*a3667aaeSNaresh Kumar Inna 	 *      configuration.
1529*a3667aaeSNaresh Kumar Inna 	 *    - If we dont plan to use the config file, we need to initialize
1530*a3667aaeSNaresh Kumar Inna 	 *      SGE entirely, including fixing the host specific registers.
1531*a3667aaeSNaresh Kumar Inna 	 * If we arent the master, we are only allowed to read and work off of
1532*a3667aaeSNaresh Kumar Inna 	 *      the already initialized SGE values.
1533*a3667aaeSNaresh Kumar Inna 	 *
1534*a3667aaeSNaresh Kumar Inna 	 * Therefore, before calling this function, we assume that the master-
1535*a3667aaeSNaresh Kumar Inna 	 * ship of the card, and whether to use config file or not, have
1536*a3667aaeSNaresh Kumar Inna 	 * already been decided. In other words, CSIO_HWF_USING_SOFT_PARAMS and
1537*a3667aaeSNaresh Kumar Inna 	 * CSIO_HWF_MASTER should be set/unset.
1538*a3667aaeSNaresh Kumar Inna 	 */
1539*a3667aaeSNaresh Kumar Inna 	if (csio_is_hw_master(hw)) {
1540*a3667aaeSNaresh Kumar Inna 		csio_wr_fixup_host_params(hw);
1541*a3667aaeSNaresh Kumar Inna 
1542*a3667aaeSNaresh Kumar Inna 		if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS)
1543*a3667aaeSNaresh Kumar Inna 			csio_wr_get_sge(hw);
1544*a3667aaeSNaresh Kumar Inna 		else
1545*a3667aaeSNaresh Kumar Inna 			csio_wr_set_sge(hw);
1546*a3667aaeSNaresh Kumar Inna 	} else
1547*a3667aaeSNaresh Kumar Inna 		csio_wr_get_sge(hw);
1548*a3667aaeSNaresh Kumar Inna }
1549*a3667aaeSNaresh Kumar Inna 
1550*a3667aaeSNaresh Kumar Inna /*
1551*a3667aaeSNaresh Kumar Inna  * csio_wrm_init - Initialize Work request module.
1552*a3667aaeSNaresh Kumar Inna  * @wrm: WR module
1553*a3667aaeSNaresh Kumar Inna  * @hw: HW pointer
1554*a3667aaeSNaresh Kumar Inna  *
1555*a3667aaeSNaresh Kumar Inna  * Allocates memory for an array of queue pointers starting at q_arr.
1556*a3667aaeSNaresh Kumar Inna  */
1557*a3667aaeSNaresh Kumar Inna int
1558*a3667aaeSNaresh Kumar Inna csio_wrm_init(struct csio_wrm *wrm, struct csio_hw *hw)
1559*a3667aaeSNaresh Kumar Inna {
1560*a3667aaeSNaresh Kumar Inna 	int i;
1561*a3667aaeSNaresh Kumar Inna 
1562*a3667aaeSNaresh Kumar Inna 	if (!wrm->num_q) {
1563*a3667aaeSNaresh Kumar Inna 		csio_err(hw, "Num queues is not set\n");
1564*a3667aaeSNaresh Kumar Inna 		return -EINVAL;
1565*a3667aaeSNaresh Kumar Inna 	}
1566*a3667aaeSNaresh Kumar Inna 
1567*a3667aaeSNaresh Kumar Inna 	wrm->q_arr = kzalloc(sizeof(struct csio_q *) * wrm->num_q, GFP_KERNEL);
1568*a3667aaeSNaresh Kumar Inna 	if (!wrm->q_arr)
1569*a3667aaeSNaresh Kumar Inna 		goto err;
1570*a3667aaeSNaresh Kumar Inna 
1571*a3667aaeSNaresh Kumar Inna 	for (i = 0; i < wrm->num_q; i++) {
1572*a3667aaeSNaresh Kumar Inna 		wrm->q_arr[i] = kzalloc(sizeof(struct csio_q), GFP_KERNEL);
1573*a3667aaeSNaresh Kumar Inna 		if (!wrm->q_arr[i]) {
1574*a3667aaeSNaresh Kumar Inna 			while (--i >= 0)
1575*a3667aaeSNaresh Kumar Inna 				kfree(wrm->q_arr[i]);
1576*a3667aaeSNaresh Kumar Inna 			goto err_free_arr;
1577*a3667aaeSNaresh Kumar Inna 		}
1578*a3667aaeSNaresh Kumar Inna 	}
1579*a3667aaeSNaresh Kumar Inna 	wrm->free_qidx	= 0;
1580*a3667aaeSNaresh Kumar Inna 
1581*a3667aaeSNaresh Kumar Inna 	return 0;
1582*a3667aaeSNaresh Kumar Inna 
1583*a3667aaeSNaresh Kumar Inna err_free_arr:
1584*a3667aaeSNaresh Kumar Inna 	kfree(wrm->q_arr);
1585*a3667aaeSNaresh Kumar Inna err:
1586*a3667aaeSNaresh Kumar Inna 	return -ENOMEM;
1587*a3667aaeSNaresh Kumar Inna }
1588*a3667aaeSNaresh Kumar Inna 
1589*a3667aaeSNaresh Kumar Inna /*
1590*a3667aaeSNaresh Kumar Inna  * csio_wrm_exit - Initialize Work request module.
1591*a3667aaeSNaresh Kumar Inna  * @wrm: WR module
1592*a3667aaeSNaresh Kumar Inna  * @hw: HW module
1593*a3667aaeSNaresh Kumar Inna  *
1594*a3667aaeSNaresh Kumar Inna  * Uninitialize WR module. Free q_arr and pointers in it.
1595*a3667aaeSNaresh Kumar Inna  * We have the additional job of freeing the DMA memory associated
1596*a3667aaeSNaresh Kumar Inna  * with the queues.
1597*a3667aaeSNaresh Kumar Inna  */
1598*a3667aaeSNaresh Kumar Inna void
1599*a3667aaeSNaresh Kumar Inna csio_wrm_exit(struct csio_wrm *wrm, struct csio_hw *hw)
1600*a3667aaeSNaresh Kumar Inna {
1601*a3667aaeSNaresh Kumar Inna 	int i;
1602*a3667aaeSNaresh Kumar Inna 	uint32_t j;
1603*a3667aaeSNaresh Kumar Inna 	struct csio_q *q;
1604*a3667aaeSNaresh Kumar Inna 	struct csio_dma_buf *buf;
1605*a3667aaeSNaresh Kumar Inna 
1606*a3667aaeSNaresh Kumar Inna 	for (i = 0; i < wrm->num_q; i++) {
1607*a3667aaeSNaresh Kumar Inna 		q = wrm->q_arr[i];
1608*a3667aaeSNaresh Kumar Inna 
1609*a3667aaeSNaresh Kumar Inna 		if (wrm->free_qidx && (i < wrm->free_qidx)) {
1610*a3667aaeSNaresh Kumar Inna 			if (q->type == CSIO_FREELIST) {
1611*a3667aaeSNaresh Kumar Inna 				if (!q->un.fl.bufs)
1612*a3667aaeSNaresh Kumar Inna 					continue;
1613*a3667aaeSNaresh Kumar Inna 				for (j = 0; j < q->credits; j++) {
1614*a3667aaeSNaresh Kumar Inna 					buf = &q->un.fl.bufs[j];
1615*a3667aaeSNaresh Kumar Inna 					if (!buf->vaddr)
1616*a3667aaeSNaresh Kumar Inna 						continue;
1617*a3667aaeSNaresh Kumar Inna 					pci_free_consistent(hw->pdev, buf->len,
1618*a3667aaeSNaresh Kumar Inna 							    buf->vaddr,
1619*a3667aaeSNaresh Kumar Inna 							    buf->paddr);
1620*a3667aaeSNaresh Kumar Inna 				}
1621*a3667aaeSNaresh Kumar Inna 				kfree(q->un.fl.bufs);
1622*a3667aaeSNaresh Kumar Inna 			}
1623*a3667aaeSNaresh Kumar Inna 			pci_free_consistent(hw->pdev, q->size,
1624*a3667aaeSNaresh Kumar Inna 					    q->vstart, q->pstart);
1625*a3667aaeSNaresh Kumar Inna 		}
1626*a3667aaeSNaresh Kumar Inna 		kfree(q);
1627*a3667aaeSNaresh Kumar Inna 	}
1628*a3667aaeSNaresh Kumar Inna 
1629*a3667aaeSNaresh Kumar Inna 	hw->flags &= ~CSIO_HWF_Q_MEM_ALLOCED;
1630*a3667aaeSNaresh Kumar Inna 
1631*a3667aaeSNaresh Kumar Inna 	kfree(wrm->q_arr);
1632*a3667aaeSNaresh Kumar Inna }
1633