xref: /openbmc/linux/drivers/infiniband/hw/qib/qib_verbs.c (revision e4dd23d753c3cb0d8533d353069e8b2e8a666360)
1f931551bSRalph Campbell /*
2f931551bSRalph Campbell  * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3f931551bSRalph Campbell  * All rights reserved.
4f931551bSRalph Campbell  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5f931551bSRalph Campbell  *
6f931551bSRalph Campbell  * This software is available to you under a choice of one of two
7f931551bSRalph Campbell  * licenses.  You may choose to be licensed under the terms of the GNU
8f931551bSRalph Campbell  * General Public License (GPL) Version 2, available from the file
9f931551bSRalph Campbell  * COPYING in the main directory of this source tree, or the
10f931551bSRalph Campbell  * OpenIB.org BSD license below:
11f931551bSRalph Campbell  *
12f931551bSRalph Campbell  *     Redistribution and use in source and binary forms, with or
13f931551bSRalph Campbell  *     without modification, are permitted provided that the following
14f931551bSRalph Campbell  *     conditions are met:
15f931551bSRalph Campbell  *
16f931551bSRalph Campbell  *      - Redistributions of source code must retain the above
17f931551bSRalph Campbell  *        copyright notice, this list of conditions and the following
18f931551bSRalph Campbell  *        disclaimer.
19f931551bSRalph Campbell  *
20f931551bSRalph Campbell  *      - Redistributions in binary form must reproduce the above
21f931551bSRalph Campbell  *        copyright notice, this list of conditions and the following
22f931551bSRalph Campbell  *        disclaimer in the documentation and/or other materials
23f931551bSRalph Campbell  *        provided with the distribution.
24f931551bSRalph Campbell  *
25f931551bSRalph Campbell  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26f931551bSRalph Campbell  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27f931551bSRalph Campbell  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28f931551bSRalph Campbell  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29f931551bSRalph Campbell  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30f931551bSRalph Campbell  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31f931551bSRalph Campbell  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32f931551bSRalph Campbell  * SOFTWARE.
33f931551bSRalph Campbell  */
34f931551bSRalph Campbell 
35f931551bSRalph Campbell #include <rdma/ib_mad.h>
36f931551bSRalph Campbell #include <rdma/ib_user_verbs.h>
37f931551bSRalph Campbell #include <linux/io.h>
38*e4dd23d7SPaul Gortmaker #include <linux/module.h>
39f931551bSRalph Campbell #include <linux/utsname.h>
40f931551bSRalph Campbell #include <linux/rculist.h>
41f931551bSRalph Campbell #include <linux/mm.h>
42f931551bSRalph Campbell 
43f931551bSRalph Campbell #include "qib.h"
44f931551bSRalph Campbell #include "qib_common.h"
45f931551bSRalph Campbell 
46f931551bSRalph Campbell static unsigned int ib_qib_qp_table_size = 251;
47f931551bSRalph Campbell module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO);
48f931551bSRalph Campbell MODULE_PARM_DESC(qp_table_size, "QP table size");
49f931551bSRalph Campbell 
50f931551bSRalph Campbell unsigned int ib_qib_lkey_table_size = 16;
51f931551bSRalph Campbell module_param_named(lkey_table_size, ib_qib_lkey_table_size, uint,
52f931551bSRalph Campbell 		   S_IRUGO);
53f931551bSRalph Campbell MODULE_PARM_DESC(lkey_table_size,
54f931551bSRalph Campbell 		 "LKEY table size in bits (2^n, 1 <= n <= 23)");
55f931551bSRalph Campbell 
56f931551bSRalph Campbell static unsigned int ib_qib_max_pds = 0xFFFF;
57f931551bSRalph Campbell module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO);
58f931551bSRalph Campbell MODULE_PARM_DESC(max_pds,
59f931551bSRalph Campbell 		 "Maximum number of protection domains to support");
60f931551bSRalph Campbell 
61f931551bSRalph Campbell static unsigned int ib_qib_max_ahs = 0xFFFF;
62f931551bSRalph Campbell module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO);
63f931551bSRalph Campbell MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
64f931551bSRalph Campbell 
65f931551bSRalph Campbell unsigned int ib_qib_max_cqes = 0x2FFFF;
66f931551bSRalph Campbell module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO);
67f931551bSRalph Campbell MODULE_PARM_DESC(max_cqes,
68f931551bSRalph Campbell 		 "Maximum number of completion queue entries to support");
69f931551bSRalph Campbell 
70f931551bSRalph Campbell unsigned int ib_qib_max_cqs = 0x1FFFF;
71f931551bSRalph Campbell module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO);
72f931551bSRalph Campbell MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
73f931551bSRalph Campbell 
74f931551bSRalph Campbell unsigned int ib_qib_max_qp_wrs = 0x3FFF;
75f931551bSRalph Campbell module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO);
76f931551bSRalph Campbell MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
77f931551bSRalph Campbell 
78f931551bSRalph Campbell unsigned int ib_qib_max_qps = 16384;
79f931551bSRalph Campbell module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO);
80f931551bSRalph Campbell MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
81f931551bSRalph Campbell 
82f931551bSRalph Campbell unsigned int ib_qib_max_sges = 0x60;
83f931551bSRalph Campbell module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO);
84f931551bSRalph Campbell MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
85f931551bSRalph Campbell 
86f931551bSRalph Campbell unsigned int ib_qib_max_mcast_grps = 16384;
87f931551bSRalph Campbell module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO);
88f931551bSRalph Campbell MODULE_PARM_DESC(max_mcast_grps,
89f931551bSRalph Campbell 		 "Maximum number of multicast groups to support");
90f931551bSRalph Campbell 
91f931551bSRalph Campbell unsigned int ib_qib_max_mcast_qp_attached = 16;
92f931551bSRalph Campbell module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached,
93f931551bSRalph Campbell 		   uint, S_IRUGO);
94f931551bSRalph Campbell MODULE_PARM_DESC(max_mcast_qp_attached,
95f931551bSRalph Campbell 		 "Maximum number of attached QPs to support");
96f931551bSRalph Campbell 
97f931551bSRalph Campbell unsigned int ib_qib_max_srqs = 1024;
98f931551bSRalph Campbell module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO);
99f931551bSRalph Campbell MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
100f931551bSRalph Campbell 
101f931551bSRalph Campbell unsigned int ib_qib_max_srq_sges = 128;
102f931551bSRalph Campbell module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO);
103f931551bSRalph Campbell MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
104f931551bSRalph Campbell 
105f931551bSRalph Campbell unsigned int ib_qib_max_srq_wrs = 0x1FFFF;
106f931551bSRalph Campbell module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO);
107f931551bSRalph Campbell MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
108f931551bSRalph Campbell 
109f931551bSRalph Campbell static unsigned int ib_qib_disable_sma;
110f931551bSRalph Campbell module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
111f931551bSRalph Campbell MODULE_PARM_DESC(disable_sma, "Disable the SMA");
112f931551bSRalph Campbell 
113f931551bSRalph Campbell /*
114f931551bSRalph Campbell  * Note that it is OK to post send work requests in the SQE and ERR
115f931551bSRalph Campbell  * states; qib_do_send() will process them and generate error
116f931551bSRalph Campbell  * completions as per IB 1.2 C10-96.
117f931551bSRalph Campbell  */
118f931551bSRalph Campbell const int ib_qib_state_ops[IB_QPS_ERR + 1] = {
119f931551bSRalph Campbell 	[IB_QPS_RESET] = 0,
120f931551bSRalph Campbell 	[IB_QPS_INIT] = QIB_POST_RECV_OK,
121f931551bSRalph Campbell 	[IB_QPS_RTR] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK,
122f931551bSRalph Campbell 	[IB_QPS_RTS] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
123f931551bSRalph Campbell 	    QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK |
124f931551bSRalph Campbell 	    QIB_PROCESS_NEXT_SEND_OK,
125f931551bSRalph Campbell 	[IB_QPS_SQD] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
126f931551bSRalph Campbell 	    QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK,
127f931551bSRalph Campbell 	[IB_QPS_SQE] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
128f931551bSRalph Campbell 	    QIB_POST_SEND_OK | QIB_FLUSH_SEND,
129f931551bSRalph Campbell 	[IB_QPS_ERR] = QIB_POST_RECV_OK | QIB_FLUSH_RECV |
130f931551bSRalph Campbell 	    QIB_POST_SEND_OK | QIB_FLUSH_SEND,
131f931551bSRalph Campbell };
132f931551bSRalph Campbell 
133f931551bSRalph Campbell struct qib_ucontext {
134f931551bSRalph Campbell 	struct ib_ucontext ibucontext;
135f931551bSRalph Campbell };
136f931551bSRalph Campbell 
137f931551bSRalph Campbell static inline struct qib_ucontext *to_iucontext(struct ib_ucontext
138f931551bSRalph Campbell 						  *ibucontext)
139f931551bSRalph Campbell {
140f931551bSRalph Campbell 	return container_of(ibucontext, struct qib_ucontext, ibucontext);
141f931551bSRalph Campbell }
142f931551bSRalph Campbell 
143f931551bSRalph Campbell /*
144f931551bSRalph Campbell  * Translate ib_wr_opcode into ib_wc_opcode.
145f931551bSRalph Campbell  */
146f931551bSRalph Campbell const enum ib_wc_opcode ib_qib_wc_opcode[] = {
147f931551bSRalph Campbell 	[IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
148f931551bSRalph Campbell 	[IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
149f931551bSRalph Campbell 	[IB_WR_SEND] = IB_WC_SEND,
150f931551bSRalph Campbell 	[IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
151f931551bSRalph Campbell 	[IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
152f931551bSRalph Campbell 	[IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
153f931551bSRalph Campbell 	[IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
154f931551bSRalph Campbell };
155f931551bSRalph Campbell 
156f931551bSRalph Campbell /*
157f931551bSRalph Campbell  * System image GUID.
158f931551bSRalph Campbell  */
159f931551bSRalph Campbell __be64 ib_qib_sys_image_guid;
160f931551bSRalph Campbell 
161f931551bSRalph Campbell /**
162f931551bSRalph Campbell  * qib_copy_sge - copy data to SGE memory
163f931551bSRalph Campbell  * @ss: the SGE state
164f931551bSRalph Campbell  * @data: the data to copy
165f931551bSRalph Campbell  * @length: the length of the data
166f931551bSRalph Campbell  */
167f931551bSRalph Campbell void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release)
168f931551bSRalph Campbell {
169f931551bSRalph Campbell 	struct qib_sge *sge = &ss->sge;
170f931551bSRalph Campbell 
171f931551bSRalph Campbell 	while (length) {
172f931551bSRalph Campbell 		u32 len = sge->length;
173f931551bSRalph Campbell 
174f931551bSRalph Campbell 		if (len > length)
175f931551bSRalph Campbell 			len = length;
176f931551bSRalph Campbell 		if (len > sge->sge_length)
177f931551bSRalph Campbell 			len = sge->sge_length;
178f931551bSRalph Campbell 		BUG_ON(len == 0);
179f931551bSRalph Campbell 		memcpy(sge->vaddr, data, len);
180f931551bSRalph Campbell 		sge->vaddr += len;
181f931551bSRalph Campbell 		sge->length -= len;
182f931551bSRalph Campbell 		sge->sge_length -= len;
183f931551bSRalph Campbell 		if (sge->sge_length == 0) {
184f931551bSRalph Campbell 			if (release)
185f931551bSRalph Campbell 				atomic_dec(&sge->mr->refcount);
186f931551bSRalph Campbell 			if (--ss->num_sge)
187f931551bSRalph Campbell 				*sge = *ss->sg_list++;
188f931551bSRalph Campbell 		} else if (sge->length == 0 && sge->mr->lkey) {
189f931551bSRalph Campbell 			if (++sge->n >= QIB_SEGSZ) {
190f931551bSRalph Campbell 				if (++sge->m >= sge->mr->mapsz)
191f931551bSRalph Campbell 					break;
192f931551bSRalph Campbell 				sge->n = 0;
193f931551bSRalph Campbell 			}
194f931551bSRalph Campbell 			sge->vaddr =
195f931551bSRalph Campbell 				sge->mr->map[sge->m]->segs[sge->n].vaddr;
196f931551bSRalph Campbell 			sge->length =
197f931551bSRalph Campbell 				sge->mr->map[sge->m]->segs[sge->n].length;
198f931551bSRalph Campbell 		}
199f931551bSRalph Campbell 		data += len;
200f931551bSRalph Campbell 		length -= len;
201f931551bSRalph Campbell 	}
202f931551bSRalph Campbell }
203f931551bSRalph Campbell 
204f931551bSRalph Campbell /**
205f931551bSRalph Campbell  * qib_skip_sge - skip over SGE memory - XXX almost dup of prev func
206f931551bSRalph Campbell  * @ss: the SGE state
207f931551bSRalph Campbell  * @length: the number of bytes to skip
208f931551bSRalph Campbell  */
209f931551bSRalph Campbell void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release)
210f931551bSRalph Campbell {
211f931551bSRalph Campbell 	struct qib_sge *sge = &ss->sge;
212f931551bSRalph Campbell 
213f931551bSRalph Campbell 	while (length) {
214f931551bSRalph Campbell 		u32 len = sge->length;
215f931551bSRalph Campbell 
216f931551bSRalph Campbell 		if (len > length)
217f931551bSRalph Campbell 			len = length;
218f931551bSRalph Campbell 		if (len > sge->sge_length)
219f931551bSRalph Campbell 			len = sge->sge_length;
220f931551bSRalph Campbell 		BUG_ON(len == 0);
221f931551bSRalph Campbell 		sge->vaddr += len;
222f931551bSRalph Campbell 		sge->length -= len;
223f931551bSRalph Campbell 		sge->sge_length -= len;
224f931551bSRalph Campbell 		if (sge->sge_length == 0) {
225f931551bSRalph Campbell 			if (release)
226f931551bSRalph Campbell 				atomic_dec(&sge->mr->refcount);
227f931551bSRalph Campbell 			if (--ss->num_sge)
228f931551bSRalph Campbell 				*sge = *ss->sg_list++;
229f931551bSRalph Campbell 		} else if (sge->length == 0 && sge->mr->lkey) {
230f931551bSRalph Campbell 			if (++sge->n >= QIB_SEGSZ) {
231f931551bSRalph Campbell 				if (++sge->m >= sge->mr->mapsz)
232f931551bSRalph Campbell 					break;
233f931551bSRalph Campbell 				sge->n = 0;
234f931551bSRalph Campbell 			}
235f931551bSRalph Campbell 			sge->vaddr =
236f931551bSRalph Campbell 				sge->mr->map[sge->m]->segs[sge->n].vaddr;
237f931551bSRalph Campbell 			sge->length =
238f931551bSRalph Campbell 				sge->mr->map[sge->m]->segs[sge->n].length;
239f931551bSRalph Campbell 		}
240f931551bSRalph Campbell 		length -= len;
241f931551bSRalph Campbell 	}
242f931551bSRalph Campbell }
243f931551bSRalph Campbell 
244f931551bSRalph Campbell /*
245f931551bSRalph Campbell  * Count the number of DMA descriptors needed to send length bytes of data.
246f931551bSRalph Campbell  * Don't modify the qib_sge_state to get the count.
247f931551bSRalph Campbell  * Return zero if any of the segments is not aligned.
248f931551bSRalph Campbell  */
249f931551bSRalph Campbell static u32 qib_count_sge(struct qib_sge_state *ss, u32 length)
250f931551bSRalph Campbell {
251f931551bSRalph Campbell 	struct qib_sge *sg_list = ss->sg_list;
252f931551bSRalph Campbell 	struct qib_sge sge = ss->sge;
253f931551bSRalph Campbell 	u8 num_sge = ss->num_sge;
254f931551bSRalph Campbell 	u32 ndesc = 1;  /* count the header */
255f931551bSRalph Campbell 
256f931551bSRalph Campbell 	while (length) {
257f931551bSRalph Campbell 		u32 len = sge.length;
258f931551bSRalph Campbell 
259f931551bSRalph Campbell 		if (len > length)
260f931551bSRalph Campbell 			len = length;
261f931551bSRalph Campbell 		if (len > sge.sge_length)
262f931551bSRalph Campbell 			len = sge.sge_length;
263f931551bSRalph Campbell 		BUG_ON(len == 0);
264f931551bSRalph Campbell 		if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
265f931551bSRalph Campbell 		    (len != length && (len & (sizeof(u32) - 1)))) {
266f931551bSRalph Campbell 			ndesc = 0;
267f931551bSRalph Campbell 			break;
268f931551bSRalph Campbell 		}
269f931551bSRalph Campbell 		ndesc++;
270f931551bSRalph Campbell 		sge.vaddr += len;
271f931551bSRalph Campbell 		sge.length -= len;
272f931551bSRalph Campbell 		sge.sge_length -= len;
273f931551bSRalph Campbell 		if (sge.sge_length == 0) {
274f931551bSRalph Campbell 			if (--num_sge)
275f931551bSRalph Campbell 				sge = *sg_list++;
276f931551bSRalph Campbell 		} else if (sge.length == 0 && sge.mr->lkey) {
277f931551bSRalph Campbell 			if (++sge.n >= QIB_SEGSZ) {
278f931551bSRalph Campbell 				if (++sge.m >= sge.mr->mapsz)
279f931551bSRalph Campbell 					break;
280f931551bSRalph Campbell 				sge.n = 0;
281f931551bSRalph Campbell 			}
282f931551bSRalph Campbell 			sge.vaddr =
283f931551bSRalph Campbell 				sge.mr->map[sge.m]->segs[sge.n].vaddr;
284f931551bSRalph Campbell 			sge.length =
285f931551bSRalph Campbell 				sge.mr->map[sge.m]->segs[sge.n].length;
286f931551bSRalph Campbell 		}
287f931551bSRalph Campbell 		length -= len;
288f931551bSRalph Campbell 	}
289f931551bSRalph Campbell 	return ndesc;
290f931551bSRalph Campbell }
291f931551bSRalph Campbell 
292f931551bSRalph Campbell /*
293f931551bSRalph Campbell  * Copy from the SGEs to the data buffer.
294f931551bSRalph Campbell  */
295f931551bSRalph Campbell static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length)
296f931551bSRalph Campbell {
297f931551bSRalph Campbell 	struct qib_sge *sge = &ss->sge;
298f931551bSRalph Campbell 
299f931551bSRalph Campbell 	while (length) {
300f931551bSRalph Campbell 		u32 len = sge->length;
301f931551bSRalph Campbell 
302f931551bSRalph Campbell 		if (len > length)
303f931551bSRalph Campbell 			len = length;
304f931551bSRalph Campbell 		if (len > sge->sge_length)
305f931551bSRalph Campbell 			len = sge->sge_length;
306f931551bSRalph Campbell 		BUG_ON(len == 0);
307f931551bSRalph Campbell 		memcpy(data, sge->vaddr, len);
308f931551bSRalph Campbell 		sge->vaddr += len;
309f931551bSRalph Campbell 		sge->length -= len;
310f931551bSRalph Campbell 		sge->sge_length -= len;
311f931551bSRalph Campbell 		if (sge->sge_length == 0) {
312f931551bSRalph Campbell 			if (--ss->num_sge)
313f931551bSRalph Campbell 				*sge = *ss->sg_list++;
314f931551bSRalph Campbell 		} else if (sge->length == 0 && sge->mr->lkey) {
315f931551bSRalph Campbell 			if (++sge->n >= QIB_SEGSZ) {
316f931551bSRalph Campbell 				if (++sge->m >= sge->mr->mapsz)
317f931551bSRalph Campbell 					break;
318f931551bSRalph Campbell 				sge->n = 0;
319f931551bSRalph Campbell 			}
320f931551bSRalph Campbell 			sge->vaddr =
321f931551bSRalph Campbell 				sge->mr->map[sge->m]->segs[sge->n].vaddr;
322f931551bSRalph Campbell 			sge->length =
323f931551bSRalph Campbell 				sge->mr->map[sge->m]->segs[sge->n].length;
324f931551bSRalph Campbell 		}
325f931551bSRalph Campbell 		data += len;
326f931551bSRalph Campbell 		length -= len;
327f931551bSRalph Campbell 	}
328f931551bSRalph Campbell }
329f931551bSRalph Campbell 
330f931551bSRalph Campbell /**
331f931551bSRalph Campbell  * qib_post_one_send - post one RC, UC, or UD send work request
332f931551bSRalph Campbell  * @qp: the QP to post on
333f931551bSRalph Campbell  * @wr: the work request to send
334f931551bSRalph Campbell  */
335f931551bSRalph Campbell static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr)
336f931551bSRalph Campbell {
337f931551bSRalph Campbell 	struct qib_swqe *wqe;
338f931551bSRalph Campbell 	u32 next;
339f931551bSRalph Campbell 	int i;
340f931551bSRalph Campbell 	int j;
341f931551bSRalph Campbell 	int acc;
342f931551bSRalph Campbell 	int ret;
343f931551bSRalph Campbell 	unsigned long flags;
344f931551bSRalph Campbell 	struct qib_lkey_table *rkt;
345f931551bSRalph Campbell 	struct qib_pd *pd;
346f931551bSRalph Campbell 
347f931551bSRalph Campbell 	spin_lock_irqsave(&qp->s_lock, flags);
348f931551bSRalph Campbell 
349f931551bSRalph Campbell 	/* Check that state is OK to post send. */
350f931551bSRalph Campbell 	if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK)))
351f931551bSRalph Campbell 		goto bail_inval;
352f931551bSRalph Campbell 
353f931551bSRalph Campbell 	/* IB spec says that num_sge == 0 is OK. */
354f931551bSRalph Campbell 	if (wr->num_sge > qp->s_max_sge)
355f931551bSRalph Campbell 		goto bail_inval;
356f931551bSRalph Campbell 
357f931551bSRalph Campbell 	/*
358f931551bSRalph Campbell 	 * Don't allow RDMA reads or atomic operations on UC or
359f931551bSRalph Campbell 	 * undefined operations.
360f931551bSRalph Campbell 	 * Make sure buffer is large enough to hold the result for atomics.
361f931551bSRalph Campbell 	 */
362f931551bSRalph Campbell 	if (wr->opcode == IB_WR_FAST_REG_MR) {
363f931551bSRalph Campbell 		if (qib_fast_reg_mr(qp, wr))
364f931551bSRalph Campbell 			goto bail_inval;
365f931551bSRalph Campbell 	} else if (qp->ibqp.qp_type == IB_QPT_UC) {
366f931551bSRalph Campbell 		if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
367f931551bSRalph Campbell 			goto bail_inval;
368f931551bSRalph Campbell 	} else if (qp->ibqp.qp_type != IB_QPT_RC) {
369f931551bSRalph Campbell 		/* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
370f931551bSRalph Campbell 		if (wr->opcode != IB_WR_SEND &&
371f931551bSRalph Campbell 		    wr->opcode != IB_WR_SEND_WITH_IMM)
372f931551bSRalph Campbell 			goto bail_inval;
373f931551bSRalph Campbell 		/* Check UD destination address PD */
374f931551bSRalph Campbell 		if (qp->ibqp.pd != wr->wr.ud.ah->pd)
375f931551bSRalph Campbell 			goto bail_inval;
376f931551bSRalph Campbell 	} else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
377f931551bSRalph Campbell 		goto bail_inval;
378f931551bSRalph Campbell 	else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
379f931551bSRalph Campbell 		   (wr->num_sge == 0 ||
380f931551bSRalph Campbell 		    wr->sg_list[0].length < sizeof(u64) ||
381f931551bSRalph Campbell 		    wr->sg_list[0].addr & (sizeof(u64) - 1)))
382f931551bSRalph Campbell 		goto bail_inval;
383f931551bSRalph Campbell 	else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic)
384f931551bSRalph Campbell 		goto bail_inval;
385f931551bSRalph Campbell 
386f931551bSRalph Campbell 	next = qp->s_head + 1;
387f931551bSRalph Campbell 	if (next >= qp->s_size)
388f931551bSRalph Campbell 		next = 0;
389f931551bSRalph Campbell 	if (next == qp->s_last) {
390f931551bSRalph Campbell 		ret = -ENOMEM;
391f931551bSRalph Campbell 		goto bail;
392f931551bSRalph Campbell 	}
393f931551bSRalph Campbell 
394f931551bSRalph Campbell 	rkt = &to_idev(qp->ibqp.device)->lk_table;
395f931551bSRalph Campbell 	pd = to_ipd(qp->ibqp.pd);
396f931551bSRalph Campbell 	wqe = get_swqe_ptr(qp, qp->s_head);
397f931551bSRalph Campbell 	wqe->wr = *wr;
398f931551bSRalph Campbell 	wqe->length = 0;
399f931551bSRalph Campbell 	j = 0;
400f931551bSRalph Campbell 	if (wr->num_sge) {
401f931551bSRalph Campbell 		acc = wr->opcode >= IB_WR_RDMA_READ ?
402f931551bSRalph Campbell 			IB_ACCESS_LOCAL_WRITE : 0;
403f931551bSRalph Campbell 		for (i = 0; i < wr->num_sge; i++) {
404f931551bSRalph Campbell 			u32 length = wr->sg_list[i].length;
405f931551bSRalph Campbell 			int ok;
406f931551bSRalph Campbell 
407f931551bSRalph Campbell 			if (length == 0)
408f931551bSRalph Campbell 				continue;
409f931551bSRalph Campbell 			ok = qib_lkey_ok(rkt, pd, &wqe->sg_list[j],
410f931551bSRalph Campbell 					 &wr->sg_list[i], acc);
411f931551bSRalph Campbell 			if (!ok)
412f931551bSRalph Campbell 				goto bail_inval_free;
413f931551bSRalph Campbell 			wqe->length += length;
414f931551bSRalph Campbell 			j++;
415f931551bSRalph Campbell 		}
416f931551bSRalph Campbell 		wqe->wr.num_sge = j;
417f931551bSRalph Campbell 	}
418f931551bSRalph Campbell 	if (qp->ibqp.qp_type == IB_QPT_UC ||
419f931551bSRalph Campbell 	    qp->ibqp.qp_type == IB_QPT_RC) {
420f931551bSRalph Campbell 		if (wqe->length > 0x80000000U)
421f931551bSRalph Campbell 			goto bail_inval_free;
422f931551bSRalph Campbell 	} else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport +
423f931551bSRalph Campbell 				  qp->port_num - 1)->ibmtu)
424f931551bSRalph Campbell 		goto bail_inval_free;
425f931551bSRalph Campbell 	else
426f931551bSRalph Campbell 		atomic_inc(&to_iah(wr->wr.ud.ah)->refcount);
427f931551bSRalph Campbell 	wqe->ssn = qp->s_ssn++;
428f931551bSRalph Campbell 	qp->s_head = next;
429f931551bSRalph Campbell 
430f931551bSRalph Campbell 	ret = 0;
431f931551bSRalph Campbell 	goto bail;
432f931551bSRalph Campbell 
433f931551bSRalph Campbell bail_inval_free:
434f931551bSRalph Campbell 	while (j) {
435f931551bSRalph Campbell 		struct qib_sge *sge = &wqe->sg_list[--j];
436f931551bSRalph Campbell 
437f931551bSRalph Campbell 		atomic_dec(&sge->mr->refcount);
438f931551bSRalph Campbell 	}
439f931551bSRalph Campbell bail_inval:
440f931551bSRalph Campbell 	ret = -EINVAL;
441f931551bSRalph Campbell bail:
442f931551bSRalph Campbell 	spin_unlock_irqrestore(&qp->s_lock, flags);
443f931551bSRalph Campbell 	return ret;
444f931551bSRalph Campbell }
445f931551bSRalph Campbell 
446f931551bSRalph Campbell /**
447f931551bSRalph Campbell  * qib_post_send - post a send on a QP
448f931551bSRalph Campbell  * @ibqp: the QP to post the send on
449f931551bSRalph Campbell  * @wr: the list of work requests to post
450f931551bSRalph Campbell  * @bad_wr: the first bad WR is put here
451f931551bSRalph Campbell  *
452f931551bSRalph Campbell  * This may be called from interrupt context.
453f931551bSRalph Campbell  */
454f931551bSRalph Campbell static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
455f931551bSRalph Campbell 			 struct ib_send_wr **bad_wr)
456f931551bSRalph Campbell {
457f931551bSRalph Campbell 	struct qib_qp *qp = to_iqp(ibqp);
458f931551bSRalph Campbell 	int err = 0;
459f931551bSRalph Campbell 
460f931551bSRalph Campbell 	for (; wr; wr = wr->next) {
461f931551bSRalph Campbell 		err = qib_post_one_send(qp, wr);
462f931551bSRalph Campbell 		if (err) {
463f931551bSRalph Campbell 			*bad_wr = wr;
464f931551bSRalph Campbell 			goto bail;
465f931551bSRalph Campbell 		}
466f931551bSRalph Campbell 	}
467f931551bSRalph Campbell 
468f931551bSRalph Campbell 	/* Try to do the send work in the caller's context. */
469f931551bSRalph Campbell 	qib_do_send(&qp->s_work);
470f931551bSRalph Campbell 
471f931551bSRalph Campbell bail:
472f931551bSRalph Campbell 	return err;
473f931551bSRalph Campbell }
474f931551bSRalph Campbell 
475f931551bSRalph Campbell /**
476f931551bSRalph Campbell  * qib_post_receive - post a receive on a QP
477f931551bSRalph Campbell  * @ibqp: the QP to post the receive on
478f931551bSRalph Campbell  * @wr: the WR to post
479f931551bSRalph Campbell  * @bad_wr: the first bad WR is put here
480f931551bSRalph Campbell  *
481f931551bSRalph Campbell  * This may be called from interrupt context.
482f931551bSRalph Campbell  */
483f931551bSRalph Campbell static int qib_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
484f931551bSRalph Campbell 			    struct ib_recv_wr **bad_wr)
485f931551bSRalph Campbell {
486f931551bSRalph Campbell 	struct qib_qp *qp = to_iqp(ibqp);
487f931551bSRalph Campbell 	struct qib_rwq *wq = qp->r_rq.wq;
488f931551bSRalph Campbell 	unsigned long flags;
489f931551bSRalph Campbell 	int ret;
490f931551bSRalph Campbell 
491f931551bSRalph Campbell 	/* Check that state is OK to post receive. */
492f931551bSRalph Campbell 	if (!(ib_qib_state_ops[qp->state] & QIB_POST_RECV_OK) || !wq) {
493f931551bSRalph Campbell 		*bad_wr = wr;
494f931551bSRalph Campbell 		ret = -EINVAL;
495f931551bSRalph Campbell 		goto bail;
496f931551bSRalph Campbell 	}
497f931551bSRalph Campbell 
498f931551bSRalph Campbell 	for (; wr; wr = wr->next) {
499f931551bSRalph Campbell 		struct qib_rwqe *wqe;
500f931551bSRalph Campbell 		u32 next;
501f931551bSRalph Campbell 		int i;
502f931551bSRalph Campbell 
503f931551bSRalph Campbell 		if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
504f931551bSRalph Campbell 			*bad_wr = wr;
505f931551bSRalph Campbell 			ret = -EINVAL;
506f931551bSRalph Campbell 			goto bail;
507f931551bSRalph Campbell 		}
508f931551bSRalph Campbell 
509f931551bSRalph Campbell 		spin_lock_irqsave(&qp->r_rq.lock, flags);
510f931551bSRalph Campbell 		next = wq->head + 1;
511f931551bSRalph Campbell 		if (next >= qp->r_rq.size)
512f931551bSRalph Campbell 			next = 0;
513f931551bSRalph Campbell 		if (next == wq->tail) {
514f931551bSRalph Campbell 			spin_unlock_irqrestore(&qp->r_rq.lock, flags);
515f931551bSRalph Campbell 			*bad_wr = wr;
516f931551bSRalph Campbell 			ret = -ENOMEM;
517f931551bSRalph Campbell 			goto bail;
518f931551bSRalph Campbell 		}
519f931551bSRalph Campbell 
520f931551bSRalph Campbell 		wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
521f931551bSRalph Campbell 		wqe->wr_id = wr->wr_id;
522f931551bSRalph Campbell 		wqe->num_sge = wr->num_sge;
523f931551bSRalph Campbell 		for (i = 0; i < wr->num_sge; i++)
524f931551bSRalph Campbell 			wqe->sg_list[i] = wr->sg_list[i];
525f931551bSRalph Campbell 		/* Make sure queue entry is written before the head index. */
526f931551bSRalph Campbell 		smp_wmb();
527f931551bSRalph Campbell 		wq->head = next;
528f931551bSRalph Campbell 		spin_unlock_irqrestore(&qp->r_rq.lock, flags);
529f931551bSRalph Campbell 	}
530f931551bSRalph Campbell 	ret = 0;
531f931551bSRalph Campbell 
532f931551bSRalph Campbell bail:
533f931551bSRalph Campbell 	return ret;
534f931551bSRalph Campbell }
535f931551bSRalph Campbell 
536f931551bSRalph Campbell /**
537f931551bSRalph Campbell  * qib_qp_rcv - processing an incoming packet on a QP
538f931551bSRalph Campbell  * @rcd: the context pointer
539f931551bSRalph Campbell  * @hdr: the packet header
540f931551bSRalph Campbell  * @has_grh: true if the packet has a GRH
541f931551bSRalph Campbell  * @data: the packet data
542f931551bSRalph Campbell  * @tlen: the packet length
543f931551bSRalph Campbell  * @qp: the QP the packet came on
544f931551bSRalph Campbell  *
545f931551bSRalph Campbell  * This is called from qib_ib_rcv() to process an incoming packet
546f931551bSRalph Campbell  * for the given QP.
547f931551bSRalph Campbell  * Called at interrupt level.
548f931551bSRalph Campbell  */
549f931551bSRalph Campbell static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
550f931551bSRalph Campbell 		       int has_grh, void *data, u32 tlen, struct qib_qp *qp)
551f931551bSRalph Campbell {
552f931551bSRalph Campbell 	struct qib_ibport *ibp = &rcd->ppd->ibport_data;
553f931551bSRalph Campbell 
554a5210c12SRalph Campbell 	spin_lock(&qp->r_lock);
555a5210c12SRalph Campbell 
556f931551bSRalph Campbell 	/* Check for valid receive state. */
557f931551bSRalph Campbell 	if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
558f931551bSRalph Campbell 		ibp->n_pkt_drops++;
559a5210c12SRalph Campbell 		goto unlock;
560f931551bSRalph Campbell 	}
561f931551bSRalph Campbell 
562f931551bSRalph Campbell 	switch (qp->ibqp.qp_type) {
563f931551bSRalph Campbell 	case IB_QPT_SMI:
564f931551bSRalph Campbell 	case IB_QPT_GSI:
565f931551bSRalph Campbell 		if (ib_qib_disable_sma)
566f931551bSRalph Campbell 			break;
567f931551bSRalph Campbell 		/* FALLTHROUGH */
568f931551bSRalph Campbell 	case IB_QPT_UD:
569f931551bSRalph Campbell 		qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp);
570f931551bSRalph Campbell 		break;
571f931551bSRalph Campbell 
572f931551bSRalph Campbell 	case IB_QPT_RC:
573f931551bSRalph Campbell 		qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp);
574f931551bSRalph Campbell 		break;
575f931551bSRalph Campbell 
576f931551bSRalph Campbell 	case IB_QPT_UC:
577f931551bSRalph Campbell 		qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp);
578f931551bSRalph Campbell 		break;
579f931551bSRalph Campbell 
580f931551bSRalph Campbell 	default:
581f931551bSRalph Campbell 		break;
582f931551bSRalph Campbell 	}
583a5210c12SRalph Campbell 
584a5210c12SRalph Campbell unlock:
585a5210c12SRalph Campbell 	spin_unlock(&qp->r_lock);
586f931551bSRalph Campbell }
587f931551bSRalph Campbell 
588f931551bSRalph Campbell /**
589f931551bSRalph Campbell  * qib_ib_rcv - process an incoming packet
590f931551bSRalph Campbell  * @rcd: the context pointer
591f931551bSRalph Campbell  * @rhdr: the header of the packet
592f931551bSRalph Campbell  * @data: the packet payload
593f931551bSRalph Campbell  * @tlen: the packet length
594f931551bSRalph Campbell  *
595f931551bSRalph Campbell  * This is called from qib_kreceive() to process an incoming packet at
596f931551bSRalph Campbell  * interrupt level. Tlen is the length of the header + data + CRC in bytes.
597f931551bSRalph Campbell  */
598f931551bSRalph Campbell void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
599f931551bSRalph Campbell {
600f931551bSRalph Campbell 	struct qib_pportdata *ppd = rcd->ppd;
601f931551bSRalph Campbell 	struct qib_ibport *ibp = &ppd->ibport_data;
602f931551bSRalph Campbell 	struct qib_ib_header *hdr = rhdr;
603f931551bSRalph Campbell 	struct qib_other_headers *ohdr;
604f931551bSRalph Campbell 	struct qib_qp *qp;
605f931551bSRalph Campbell 	u32 qp_num;
606f931551bSRalph Campbell 	int lnh;
607f931551bSRalph Campbell 	u8 opcode;
608f931551bSRalph Campbell 	u16 lid;
609f931551bSRalph Campbell 
610f931551bSRalph Campbell 	/* 24 == LRH+BTH+CRC */
611f931551bSRalph Campbell 	if (unlikely(tlen < 24))
612f931551bSRalph Campbell 		goto drop;
613f931551bSRalph Campbell 
614f931551bSRalph Campbell 	/* Check for a valid destination LID (see ch. 7.11.1). */
615f931551bSRalph Campbell 	lid = be16_to_cpu(hdr->lrh[1]);
616f931551bSRalph Campbell 	if (lid < QIB_MULTICAST_LID_BASE) {
617f931551bSRalph Campbell 		lid &= ~((1 << ppd->lmc) - 1);
618f931551bSRalph Campbell 		if (unlikely(lid != ppd->lid))
619f931551bSRalph Campbell 			goto drop;
620f931551bSRalph Campbell 	}
621f931551bSRalph Campbell 
622f931551bSRalph Campbell 	/* Check for GRH */
623f931551bSRalph Campbell 	lnh = be16_to_cpu(hdr->lrh[0]) & 3;
624f931551bSRalph Campbell 	if (lnh == QIB_LRH_BTH)
625f931551bSRalph Campbell 		ohdr = &hdr->u.oth;
626f931551bSRalph Campbell 	else if (lnh == QIB_LRH_GRH) {
627f931551bSRalph Campbell 		u32 vtf;
628f931551bSRalph Campbell 
629f931551bSRalph Campbell 		ohdr = &hdr->u.l.oth;
630f931551bSRalph Campbell 		if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
631f931551bSRalph Campbell 			goto drop;
632f931551bSRalph Campbell 		vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
633f931551bSRalph Campbell 		if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
634f931551bSRalph Campbell 			goto drop;
635f931551bSRalph Campbell 	} else
636f931551bSRalph Campbell 		goto drop;
637f931551bSRalph Campbell 
638f931551bSRalph Campbell 	opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
639f931551bSRalph Campbell 	ibp->opstats[opcode & 0x7f].n_bytes += tlen;
640f931551bSRalph Campbell 	ibp->opstats[opcode & 0x7f].n_packets++;
641f931551bSRalph Campbell 
642f931551bSRalph Campbell 	/* Get the destination QP number. */
643f931551bSRalph Campbell 	qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
644f931551bSRalph Campbell 	if (qp_num == QIB_MULTICAST_QPN) {
645f931551bSRalph Campbell 		struct qib_mcast *mcast;
646f931551bSRalph Campbell 		struct qib_mcast_qp *p;
647f931551bSRalph Campbell 
648f931551bSRalph Campbell 		if (lnh != QIB_LRH_GRH)
649f931551bSRalph Campbell 			goto drop;
650f931551bSRalph Campbell 		mcast = qib_mcast_find(ibp, &hdr->u.l.grh.dgid);
651f931551bSRalph Campbell 		if (mcast == NULL)
652f931551bSRalph Campbell 			goto drop;
653f931551bSRalph Campbell 		ibp->n_multicast_rcv++;
654f931551bSRalph Campbell 		list_for_each_entry_rcu(p, &mcast->qp_list, list)
655f931551bSRalph Campbell 			qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
656f931551bSRalph Campbell 		/*
657f931551bSRalph Campbell 		 * Notify qib_multicast_detach() if it is waiting for us
658f931551bSRalph Campbell 		 * to finish.
659f931551bSRalph Campbell 		 */
660f931551bSRalph Campbell 		if (atomic_dec_return(&mcast->refcount) <= 1)
661f931551bSRalph Campbell 			wake_up(&mcast->wait);
662f931551bSRalph Campbell 	} else {
663f931551bSRalph Campbell 		qp = qib_lookup_qpn(ibp, qp_num);
664f931551bSRalph Campbell 		if (!qp)
665f931551bSRalph Campbell 			goto drop;
666f931551bSRalph Campbell 		ibp->n_unicast_rcv++;
667f931551bSRalph Campbell 		qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
668f931551bSRalph Campbell 		/*
669f931551bSRalph Campbell 		 * Notify qib_destroy_qp() if it is waiting
670f931551bSRalph Campbell 		 * for us to finish.
671f931551bSRalph Campbell 		 */
672f931551bSRalph Campbell 		if (atomic_dec_and_test(&qp->refcount))
673f931551bSRalph Campbell 			wake_up(&qp->wait);
674f931551bSRalph Campbell 	}
675f931551bSRalph Campbell 	return;
676f931551bSRalph Campbell 
677f931551bSRalph Campbell drop:
678f931551bSRalph Campbell 	ibp->n_pkt_drops++;
679f931551bSRalph Campbell }
680f931551bSRalph Campbell 
681f931551bSRalph Campbell /*
682f931551bSRalph Campbell  * This is called from a timer to check for QPs
683f931551bSRalph Campbell  * which need kernel memory in order to send a packet.
684f931551bSRalph Campbell  */
685f931551bSRalph Campbell static void mem_timer(unsigned long data)
686f931551bSRalph Campbell {
687f931551bSRalph Campbell 	struct qib_ibdev *dev = (struct qib_ibdev *) data;
688f931551bSRalph Campbell 	struct list_head *list = &dev->memwait;
689f931551bSRalph Campbell 	struct qib_qp *qp = NULL;
690f931551bSRalph Campbell 	unsigned long flags;
691f931551bSRalph Campbell 
692f931551bSRalph Campbell 	spin_lock_irqsave(&dev->pending_lock, flags);
693f931551bSRalph Campbell 	if (!list_empty(list)) {
694f931551bSRalph Campbell 		qp = list_entry(list->next, struct qib_qp, iowait);
695f931551bSRalph Campbell 		list_del_init(&qp->iowait);
696f931551bSRalph Campbell 		atomic_inc(&qp->refcount);
697f931551bSRalph Campbell 		if (!list_empty(list))
698f931551bSRalph Campbell 			mod_timer(&dev->mem_timer, jiffies + 1);
699f931551bSRalph Campbell 	}
700f931551bSRalph Campbell 	spin_unlock_irqrestore(&dev->pending_lock, flags);
701f931551bSRalph Campbell 
702f931551bSRalph Campbell 	if (qp) {
703f931551bSRalph Campbell 		spin_lock_irqsave(&qp->s_lock, flags);
704f931551bSRalph Campbell 		if (qp->s_flags & QIB_S_WAIT_KMEM) {
705f931551bSRalph Campbell 			qp->s_flags &= ~QIB_S_WAIT_KMEM;
706f931551bSRalph Campbell 			qib_schedule_send(qp);
707f931551bSRalph Campbell 		}
708f931551bSRalph Campbell 		spin_unlock_irqrestore(&qp->s_lock, flags);
709f931551bSRalph Campbell 		if (atomic_dec_and_test(&qp->refcount))
710f931551bSRalph Campbell 			wake_up(&qp->wait);
711f931551bSRalph Campbell 	}
712f931551bSRalph Campbell }
713f931551bSRalph Campbell 
714f931551bSRalph Campbell static void update_sge(struct qib_sge_state *ss, u32 length)
715f931551bSRalph Campbell {
716f931551bSRalph Campbell 	struct qib_sge *sge = &ss->sge;
717f931551bSRalph Campbell 
718f931551bSRalph Campbell 	sge->vaddr += length;
719f931551bSRalph Campbell 	sge->length -= length;
720f931551bSRalph Campbell 	sge->sge_length -= length;
721f931551bSRalph Campbell 	if (sge->sge_length == 0) {
722f931551bSRalph Campbell 		if (--ss->num_sge)
723f931551bSRalph Campbell 			*sge = *ss->sg_list++;
724f931551bSRalph Campbell 	} else if (sge->length == 0 && sge->mr->lkey) {
725f931551bSRalph Campbell 		if (++sge->n >= QIB_SEGSZ) {
726f931551bSRalph Campbell 			if (++sge->m >= sge->mr->mapsz)
727f931551bSRalph Campbell 				return;
728f931551bSRalph Campbell 			sge->n = 0;
729f931551bSRalph Campbell 		}
730f931551bSRalph Campbell 		sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
731f931551bSRalph Campbell 		sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
732f931551bSRalph Campbell 	}
733f931551bSRalph Campbell }
734f931551bSRalph Campbell 
735f931551bSRalph Campbell #ifdef __LITTLE_ENDIAN
736f931551bSRalph Campbell static inline u32 get_upper_bits(u32 data, u32 shift)
737f931551bSRalph Campbell {
738f931551bSRalph Campbell 	return data >> shift;
739f931551bSRalph Campbell }
740f931551bSRalph Campbell 
741f931551bSRalph Campbell static inline u32 set_upper_bits(u32 data, u32 shift)
742f931551bSRalph Campbell {
743f931551bSRalph Campbell 	return data << shift;
744f931551bSRalph Campbell }
745f931551bSRalph Campbell 
746f931551bSRalph Campbell static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
747f931551bSRalph Campbell {
748f931551bSRalph Campbell 	data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
749f931551bSRalph Campbell 	data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
750f931551bSRalph Campbell 	return data;
751f931551bSRalph Campbell }
752f931551bSRalph Campbell #else
753f931551bSRalph Campbell static inline u32 get_upper_bits(u32 data, u32 shift)
754f931551bSRalph Campbell {
755f931551bSRalph Campbell 	return data << shift;
756f931551bSRalph Campbell }
757f931551bSRalph Campbell 
758f931551bSRalph Campbell static inline u32 set_upper_bits(u32 data, u32 shift)
759f931551bSRalph Campbell {
760f931551bSRalph Campbell 	return data >> shift;
761f931551bSRalph Campbell }
762f931551bSRalph Campbell 
763f931551bSRalph Campbell static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
764f931551bSRalph Campbell {
765f931551bSRalph Campbell 	data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
766f931551bSRalph Campbell 	data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
767f931551bSRalph Campbell 	return data;
768f931551bSRalph Campbell }
769f931551bSRalph Campbell #endif
770f931551bSRalph Campbell 
771f931551bSRalph Campbell static void copy_io(u32 __iomem *piobuf, struct qib_sge_state *ss,
772f931551bSRalph Campbell 		    u32 length, unsigned flush_wc)
773f931551bSRalph Campbell {
774f931551bSRalph Campbell 	u32 extra = 0;
775f931551bSRalph Campbell 	u32 data = 0;
776f931551bSRalph Campbell 	u32 last;
777f931551bSRalph Campbell 
778f931551bSRalph Campbell 	while (1) {
779f931551bSRalph Campbell 		u32 len = ss->sge.length;
780f931551bSRalph Campbell 		u32 off;
781f931551bSRalph Campbell 
782f931551bSRalph Campbell 		if (len > length)
783f931551bSRalph Campbell 			len = length;
784f931551bSRalph Campbell 		if (len > ss->sge.sge_length)
785f931551bSRalph Campbell 			len = ss->sge.sge_length;
786f931551bSRalph Campbell 		BUG_ON(len == 0);
787f931551bSRalph Campbell 		/* If the source address is not aligned, try to align it. */
788f931551bSRalph Campbell 		off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
789f931551bSRalph Campbell 		if (off) {
790f931551bSRalph Campbell 			u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
791f931551bSRalph Campbell 					    ~(sizeof(u32) - 1));
792f931551bSRalph Campbell 			u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
793f931551bSRalph Campbell 			u32 y;
794f931551bSRalph Campbell 
795f931551bSRalph Campbell 			y = sizeof(u32) - off;
796f931551bSRalph Campbell 			if (len > y)
797f931551bSRalph Campbell 				len = y;
798f931551bSRalph Campbell 			if (len + extra >= sizeof(u32)) {
799f931551bSRalph Campbell 				data |= set_upper_bits(v, extra *
800f931551bSRalph Campbell 						       BITS_PER_BYTE);
801f931551bSRalph Campbell 				len = sizeof(u32) - extra;
802f931551bSRalph Campbell 				if (len == length) {
803f931551bSRalph Campbell 					last = data;
804f931551bSRalph Campbell 					break;
805f931551bSRalph Campbell 				}
806f931551bSRalph Campbell 				__raw_writel(data, piobuf);
807f931551bSRalph Campbell 				piobuf++;
808f931551bSRalph Campbell 				extra = 0;
809f931551bSRalph Campbell 				data = 0;
810f931551bSRalph Campbell 			} else {
811f931551bSRalph Campbell 				/* Clear unused upper bytes */
812f931551bSRalph Campbell 				data |= clear_upper_bytes(v, len, extra);
813f931551bSRalph Campbell 				if (len == length) {
814f931551bSRalph Campbell 					last = data;
815f931551bSRalph Campbell 					break;
816f931551bSRalph Campbell 				}
817f931551bSRalph Campbell 				extra += len;
818f931551bSRalph Campbell 			}
819f931551bSRalph Campbell 		} else if (extra) {
820f931551bSRalph Campbell 			/* Source address is aligned. */
821f931551bSRalph Campbell 			u32 *addr = (u32 *) ss->sge.vaddr;
822f931551bSRalph Campbell 			int shift = extra * BITS_PER_BYTE;
823f931551bSRalph Campbell 			int ushift = 32 - shift;
824f931551bSRalph Campbell 			u32 l = len;
825f931551bSRalph Campbell 
826f931551bSRalph Campbell 			while (l >= sizeof(u32)) {
827f931551bSRalph Campbell 				u32 v = *addr;
828f931551bSRalph Campbell 
829f931551bSRalph Campbell 				data |= set_upper_bits(v, shift);
830f931551bSRalph Campbell 				__raw_writel(data, piobuf);
831f931551bSRalph Campbell 				data = get_upper_bits(v, ushift);
832f931551bSRalph Campbell 				piobuf++;
833f931551bSRalph Campbell 				addr++;
834f931551bSRalph Campbell 				l -= sizeof(u32);
835f931551bSRalph Campbell 			}
836f931551bSRalph Campbell 			/*
837f931551bSRalph Campbell 			 * We still have 'extra' number of bytes leftover.
838f931551bSRalph Campbell 			 */
839f931551bSRalph Campbell 			if (l) {
840f931551bSRalph Campbell 				u32 v = *addr;
841f931551bSRalph Campbell 
842f931551bSRalph Campbell 				if (l + extra >= sizeof(u32)) {
843f931551bSRalph Campbell 					data |= set_upper_bits(v, shift);
844f931551bSRalph Campbell 					len -= l + extra - sizeof(u32);
845f931551bSRalph Campbell 					if (len == length) {
846f931551bSRalph Campbell 						last = data;
847f931551bSRalph Campbell 						break;
848f931551bSRalph Campbell 					}
849f931551bSRalph Campbell 					__raw_writel(data, piobuf);
850f931551bSRalph Campbell 					piobuf++;
851f931551bSRalph Campbell 					extra = 0;
852f931551bSRalph Campbell 					data = 0;
853f931551bSRalph Campbell 				} else {
854f931551bSRalph Campbell 					/* Clear unused upper bytes */
855f931551bSRalph Campbell 					data |= clear_upper_bytes(v, l, extra);
856f931551bSRalph Campbell 					if (len == length) {
857f931551bSRalph Campbell 						last = data;
858f931551bSRalph Campbell 						break;
859f931551bSRalph Campbell 					}
860f931551bSRalph Campbell 					extra += l;
861f931551bSRalph Campbell 				}
862f931551bSRalph Campbell 			} else if (len == length) {
863f931551bSRalph Campbell 				last = data;
864f931551bSRalph Campbell 				break;
865f931551bSRalph Campbell 			}
866f931551bSRalph Campbell 		} else if (len == length) {
867f931551bSRalph Campbell 			u32 w;
868f931551bSRalph Campbell 
869f931551bSRalph Campbell 			/*
870f931551bSRalph Campbell 			 * Need to round up for the last dword in the
871f931551bSRalph Campbell 			 * packet.
872f931551bSRalph Campbell 			 */
873f931551bSRalph Campbell 			w = (len + 3) >> 2;
874f931551bSRalph Campbell 			qib_pio_copy(piobuf, ss->sge.vaddr, w - 1);
875f931551bSRalph Campbell 			piobuf += w - 1;
876f931551bSRalph Campbell 			last = ((u32 *) ss->sge.vaddr)[w - 1];
877f931551bSRalph Campbell 			break;
878f931551bSRalph Campbell 		} else {
879f931551bSRalph Campbell 			u32 w = len >> 2;
880f931551bSRalph Campbell 
881f931551bSRalph Campbell 			qib_pio_copy(piobuf, ss->sge.vaddr, w);
882f931551bSRalph Campbell 			piobuf += w;
883f931551bSRalph Campbell 
884f931551bSRalph Campbell 			extra = len & (sizeof(u32) - 1);
885f931551bSRalph Campbell 			if (extra) {
886f931551bSRalph Campbell 				u32 v = ((u32 *) ss->sge.vaddr)[w];
887f931551bSRalph Campbell 
888f931551bSRalph Campbell 				/* Clear unused upper bytes */
889f931551bSRalph Campbell 				data = clear_upper_bytes(v, extra, 0);
890f931551bSRalph Campbell 			}
891f931551bSRalph Campbell 		}
892f931551bSRalph Campbell 		update_sge(ss, len);
893f931551bSRalph Campbell 		length -= len;
894f931551bSRalph Campbell 	}
895f931551bSRalph Campbell 	/* Update address before sending packet. */
896f931551bSRalph Campbell 	update_sge(ss, length);
897f931551bSRalph Campbell 	if (flush_wc) {
898f931551bSRalph Campbell 		/* must flush early everything before trigger word */
899f931551bSRalph Campbell 		qib_flush_wc();
900f931551bSRalph Campbell 		__raw_writel(last, piobuf);
901f931551bSRalph Campbell 		/* be sure trigger word is written */
902f931551bSRalph Campbell 		qib_flush_wc();
903f931551bSRalph Campbell 	} else
904f931551bSRalph Campbell 		__raw_writel(last, piobuf);
905f931551bSRalph Campbell }
906f931551bSRalph Campbell 
907f931551bSRalph Campbell static struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
908f931551bSRalph Campbell 					 struct qib_qp *qp, int *retp)
909f931551bSRalph Campbell {
910f931551bSRalph Campbell 	struct qib_verbs_txreq *tx;
911f931551bSRalph Campbell 	unsigned long flags;
912f931551bSRalph Campbell 
913f931551bSRalph Campbell 	spin_lock_irqsave(&qp->s_lock, flags);
914f931551bSRalph Campbell 	spin_lock(&dev->pending_lock);
915f931551bSRalph Campbell 
916f931551bSRalph Campbell 	if (!list_empty(&dev->txreq_free)) {
917f931551bSRalph Campbell 		struct list_head *l = dev->txreq_free.next;
918f931551bSRalph Campbell 
919f931551bSRalph Campbell 		list_del(l);
920f931551bSRalph Campbell 		tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
921f931551bSRalph Campbell 		*retp = 0;
922f931551bSRalph Campbell 	} else {
923f931551bSRalph Campbell 		if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK &&
924f931551bSRalph Campbell 		    list_empty(&qp->iowait)) {
925f931551bSRalph Campbell 			dev->n_txwait++;
926f931551bSRalph Campbell 			qp->s_flags |= QIB_S_WAIT_TX;
927f931551bSRalph Campbell 			list_add_tail(&qp->iowait, &dev->txwait);
928f931551bSRalph Campbell 		}
929f931551bSRalph Campbell 		tx = NULL;
930f931551bSRalph Campbell 		qp->s_flags &= ~QIB_S_BUSY;
931f931551bSRalph Campbell 		*retp = -EBUSY;
932f931551bSRalph Campbell 	}
933f931551bSRalph Campbell 
934f931551bSRalph Campbell 	spin_unlock(&dev->pending_lock);
935f931551bSRalph Campbell 	spin_unlock_irqrestore(&qp->s_lock, flags);
936f931551bSRalph Campbell 
937f931551bSRalph Campbell 	return tx;
938f931551bSRalph Campbell }
939f931551bSRalph Campbell 
940f931551bSRalph Campbell void qib_put_txreq(struct qib_verbs_txreq *tx)
941f931551bSRalph Campbell {
942f931551bSRalph Campbell 	struct qib_ibdev *dev;
943f931551bSRalph Campbell 	struct qib_qp *qp;
944f931551bSRalph Campbell 	unsigned long flags;
945f931551bSRalph Campbell 
946f931551bSRalph Campbell 	qp = tx->qp;
947f931551bSRalph Campbell 	dev = to_idev(qp->ibqp.device);
948f931551bSRalph Campbell 
949f931551bSRalph Campbell 	if (atomic_dec_and_test(&qp->refcount))
950f931551bSRalph Campbell 		wake_up(&qp->wait);
951f931551bSRalph Campbell 	if (tx->mr) {
952f931551bSRalph Campbell 		atomic_dec(&tx->mr->refcount);
953f931551bSRalph Campbell 		tx->mr = NULL;
954f931551bSRalph Campbell 	}
955f931551bSRalph Campbell 	if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) {
956f931551bSRalph Campbell 		tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF;
957f931551bSRalph Campbell 		dma_unmap_single(&dd_from_dev(dev)->pcidev->dev,
958f931551bSRalph Campbell 				 tx->txreq.addr, tx->hdr_dwords << 2,
959f931551bSRalph Campbell 				 DMA_TO_DEVICE);
960f931551bSRalph Campbell 		kfree(tx->align_buf);
961f931551bSRalph Campbell 	}
962f931551bSRalph Campbell 
963f931551bSRalph Campbell 	spin_lock_irqsave(&dev->pending_lock, flags);
964f931551bSRalph Campbell 
965f931551bSRalph Campbell 	/* Put struct back on free list */
966f931551bSRalph Campbell 	list_add(&tx->txreq.list, &dev->txreq_free);
967f931551bSRalph Campbell 
968f931551bSRalph Campbell 	if (!list_empty(&dev->txwait)) {
969f931551bSRalph Campbell 		/* Wake up first QP wanting a free struct */
970f931551bSRalph Campbell 		qp = list_entry(dev->txwait.next, struct qib_qp, iowait);
971f931551bSRalph Campbell 		list_del_init(&qp->iowait);
972f931551bSRalph Campbell 		atomic_inc(&qp->refcount);
973f931551bSRalph Campbell 		spin_unlock_irqrestore(&dev->pending_lock, flags);
974f931551bSRalph Campbell 
975f931551bSRalph Campbell 		spin_lock_irqsave(&qp->s_lock, flags);
976f931551bSRalph Campbell 		if (qp->s_flags & QIB_S_WAIT_TX) {
977f931551bSRalph Campbell 			qp->s_flags &= ~QIB_S_WAIT_TX;
978f931551bSRalph Campbell 			qib_schedule_send(qp);
979f931551bSRalph Campbell 		}
980f931551bSRalph Campbell 		spin_unlock_irqrestore(&qp->s_lock, flags);
981f931551bSRalph Campbell 
982f931551bSRalph Campbell 		if (atomic_dec_and_test(&qp->refcount))
983f931551bSRalph Campbell 			wake_up(&qp->wait);
984f931551bSRalph Campbell 	} else
985f931551bSRalph Campbell 		spin_unlock_irqrestore(&dev->pending_lock, flags);
986f931551bSRalph Campbell }
987f931551bSRalph Campbell 
988f931551bSRalph Campbell /*
989f931551bSRalph Campbell  * This is called when there are send DMA descriptors that might be
990f931551bSRalph Campbell  * available.
991f931551bSRalph Campbell  *
992f931551bSRalph Campbell  * This is called with ppd->sdma_lock held.
993f931551bSRalph Campbell  */
994f931551bSRalph Campbell void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
995f931551bSRalph Campbell {
996f931551bSRalph Campbell 	struct qib_qp *qp, *nqp;
997f931551bSRalph Campbell 	struct qib_qp *qps[20];
998f931551bSRalph Campbell 	struct qib_ibdev *dev;
999f931551bSRalph Campbell 	unsigned i, n;
1000f931551bSRalph Campbell 
1001f931551bSRalph Campbell 	n = 0;
1002f931551bSRalph Campbell 	dev = &ppd->dd->verbs_dev;
1003f931551bSRalph Campbell 	spin_lock(&dev->pending_lock);
1004f931551bSRalph Campbell 
1005f931551bSRalph Campbell 	/* Search wait list for first QP wanting DMA descriptors. */
1006f931551bSRalph Campbell 	list_for_each_entry_safe(qp, nqp, &dev->dmawait, iowait) {
1007f931551bSRalph Campbell 		if (qp->port_num != ppd->port)
1008f931551bSRalph Campbell 			continue;
1009f931551bSRalph Campbell 		if (n == ARRAY_SIZE(qps))
1010f931551bSRalph Campbell 			break;
1011f931551bSRalph Campbell 		if (qp->s_tx->txreq.sg_count > avail)
1012f931551bSRalph Campbell 			break;
1013f931551bSRalph Campbell 		avail -= qp->s_tx->txreq.sg_count;
1014f931551bSRalph Campbell 		list_del_init(&qp->iowait);
1015f931551bSRalph Campbell 		atomic_inc(&qp->refcount);
1016f931551bSRalph Campbell 		qps[n++] = qp;
1017f931551bSRalph Campbell 	}
1018f931551bSRalph Campbell 
1019f931551bSRalph Campbell 	spin_unlock(&dev->pending_lock);
1020f931551bSRalph Campbell 
1021f931551bSRalph Campbell 	for (i = 0; i < n; i++) {
1022f931551bSRalph Campbell 		qp = qps[i];
1023f931551bSRalph Campbell 		spin_lock(&qp->s_lock);
1024f931551bSRalph Campbell 		if (qp->s_flags & QIB_S_WAIT_DMA_DESC) {
1025f931551bSRalph Campbell 			qp->s_flags &= ~QIB_S_WAIT_DMA_DESC;
1026f931551bSRalph Campbell 			qib_schedule_send(qp);
1027f931551bSRalph Campbell 		}
1028f931551bSRalph Campbell 		spin_unlock(&qp->s_lock);
1029f931551bSRalph Campbell 		if (atomic_dec_and_test(&qp->refcount))
1030f931551bSRalph Campbell 			wake_up(&qp->wait);
1031f931551bSRalph Campbell 	}
1032f931551bSRalph Campbell }
1033f931551bSRalph Campbell 
1034f931551bSRalph Campbell /*
1035f931551bSRalph Campbell  * This is called with ppd->sdma_lock held.
1036f931551bSRalph Campbell  */
1037f931551bSRalph Campbell static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
1038f931551bSRalph Campbell {
1039f931551bSRalph Campbell 	struct qib_verbs_txreq *tx =
1040f931551bSRalph Campbell 		container_of(cookie, struct qib_verbs_txreq, txreq);
1041f931551bSRalph Campbell 	struct qib_qp *qp = tx->qp;
1042f931551bSRalph Campbell 
1043f931551bSRalph Campbell 	spin_lock(&qp->s_lock);
1044f931551bSRalph Campbell 	if (tx->wqe)
1045f931551bSRalph Campbell 		qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
1046f931551bSRalph Campbell 	else if (qp->ibqp.qp_type == IB_QPT_RC) {
1047f931551bSRalph Campbell 		struct qib_ib_header *hdr;
1048f931551bSRalph Campbell 
1049f931551bSRalph Campbell 		if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF)
1050f931551bSRalph Campbell 			hdr = &tx->align_buf->hdr;
1051f931551bSRalph Campbell 		else {
1052f931551bSRalph Campbell 			struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1053f931551bSRalph Campbell 
1054f931551bSRalph Campbell 			hdr = &dev->pio_hdrs[tx->hdr_inx].hdr;
1055f931551bSRalph Campbell 		}
1056f931551bSRalph Campbell 		qib_rc_send_complete(qp, hdr);
1057f931551bSRalph Campbell 	}
1058f931551bSRalph Campbell 	if (atomic_dec_and_test(&qp->s_dma_busy)) {
1059f931551bSRalph Campbell 		if (qp->state == IB_QPS_RESET)
1060f931551bSRalph Campbell 			wake_up(&qp->wait_dma);
1061f931551bSRalph Campbell 		else if (qp->s_flags & QIB_S_WAIT_DMA) {
1062f931551bSRalph Campbell 			qp->s_flags &= ~QIB_S_WAIT_DMA;
1063f931551bSRalph Campbell 			qib_schedule_send(qp);
1064f931551bSRalph Campbell 		}
1065f931551bSRalph Campbell 	}
1066f931551bSRalph Campbell 	spin_unlock(&qp->s_lock);
1067f931551bSRalph Campbell 
1068f931551bSRalph Campbell 	qib_put_txreq(tx);
1069f931551bSRalph Campbell }
1070f931551bSRalph Campbell 
1071f931551bSRalph Campbell static int wait_kmem(struct qib_ibdev *dev, struct qib_qp *qp)
1072f931551bSRalph Campbell {
1073f931551bSRalph Campbell 	unsigned long flags;
1074f931551bSRalph Campbell 	int ret = 0;
1075f931551bSRalph Campbell 
1076f931551bSRalph Campbell 	spin_lock_irqsave(&qp->s_lock, flags);
1077f931551bSRalph Campbell 	if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
1078f931551bSRalph Campbell 		spin_lock(&dev->pending_lock);
1079f931551bSRalph Campbell 		if (list_empty(&qp->iowait)) {
1080f931551bSRalph Campbell 			if (list_empty(&dev->memwait))
1081f931551bSRalph Campbell 				mod_timer(&dev->mem_timer, jiffies + 1);
1082f931551bSRalph Campbell 			qp->s_flags |= QIB_S_WAIT_KMEM;
1083f931551bSRalph Campbell 			list_add_tail(&qp->iowait, &dev->memwait);
1084f931551bSRalph Campbell 		}
1085f931551bSRalph Campbell 		spin_unlock(&dev->pending_lock);
1086f931551bSRalph Campbell 		qp->s_flags &= ~QIB_S_BUSY;
1087f931551bSRalph Campbell 		ret = -EBUSY;
1088f931551bSRalph Campbell 	}
1089f931551bSRalph Campbell 	spin_unlock_irqrestore(&qp->s_lock, flags);
1090f931551bSRalph Campbell 
1091f931551bSRalph Campbell 	return ret;
1092f931551bSRalph Campbell }
1093f931551bSRalph Campbell 
1094f931551bSRalph Campbell static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr,
1095f931551bSRalph Campbell 			      u32 hdrwords, struct qib_sge_state *ss, u32 len,
1096f931551bSRalph Campbell 			      u32 plen, u32 dwords)
1097f931551bSRalph Campbell {
1098f931551bSRalph Campbell 	struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1099f931551bSRalph Campbell 	struct qib_devdata *dd = dd_from_dev(dev);
1100f931551bSRalph Campbell 	struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
1101f931551bSRalph Campbell 	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1102f931551bSRalph Campbell 	struct qib_verbs_txreq *tx;
1103f931551bSRalph Campbell 	struct qib_pio_header *phdr;
1104f931551bSRalph Campbell 	u32 control;
1105f931551bSRalph Campbell 	u32 ndesc;
1106f931551bSRalph Campbell 	int ret;
1107f931551bSRalph Campbell 
1108f931551bSRalph Campbell 	tx = qp->s_tx;
1109f931551bSRalph Campbell 	if (tx) {
1110f931551bSRalph Campbell 		qp->s_tx = NULL;
1111f931551bSRalph Campbell 		/* resend previously constructed packet */
1112f931551bSRalph Campbell 		ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx);
1113f931551bSRalph Campbell 		goto bail;
1114f931551bSRalph Campbell 	}
1115f931551bSRalph Campbell 
1116f931551bSRalph Campbell 	tx = get_txreq(dev, qp, &ret);
1117f931551bSRalph Campbell 	if (!tx)
1118f931551bSRalph Campbell 		goto bail;
1119f931551bSRalph Campbell 
1120f931551bSRalph Campbell 	control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
1121f931551bSRalph Campbell 				       be16_to_cpu(hdr->lrh[0]) >> 12);
1122f931551bSRalph Campbell 	tx->qp = qp;
1123f931551bSRalph Campbell 	atomic_inc(&qp->refcount);
1124f931551bSRalph Campbell 	tx->wqe = qp->s_wqe;
1125f931551bSRalph Campbell 	tx->mr = qp->s_rdma_mr;
1126f931551bSRalph Campbell 	if (qp->s_rdma_mr)
1127f931551bSRalph Campbell 		qp->s_rdma_mr = NULL;
1128f931551bSRalph Campbell 	tx->txreq.callback = sdma_complete;
1129f931551bSRalph Campbell 	if (dd->flags & QIB_HAS_SDMA_TIMEOUT)
1130f931551bSRalph Campbell 		tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST;
1131f931551bSRalph Campbell 	else
1132f931551bSRalph Campbell 		tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ;
1133f931551bSRalph Campbell 	if (plen + 1 > dd->piosize2kmax_dwords)
1134f931551bSRalph Campbell 		tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF;
1135f931551bSRalph Campbell 
1136f931551bSRalph Campbell 	if (len) {
1137f931551bSRalph Campbell 		/*
1138f931551bSRalph Campbell 		 * Don't try to DMA if it takes more descriptors than
1139f931551bSRalph Campbell 		 * the queue holds.
1140f931551bSRalph Campbell 		 */
1141f931551bSRalph Campbell 		ndesc = qib_count_sge(ss, len);
1142f931551bSRalph Campbell 		if (ndesc >= ppd->sdma_descq_cnt)
1143f931551bSRalph Campbell 			ndesc = 0;
1144f931551bSRalph Campbell 	} else
1145f931551bSRalph Campbell 		ndesc = 1;
1146f931551bSRalph Campbell 	if (ndesc) {
1147f931551bSRalph Campbell 		phdr = &dev->pio_hdrs[tx->hdr_inx];
1148f931551bSRalph Campbell 		phdr->pbc[0] = cpu_to_le32(plen);
1149f931551bSRalph Campbell 		phdr->pbc[1] = cpu_to_le32(control);
1150f931551bSRalph Campbell 		memcpy(&phdr->hdr, hdr, hdrwords << 2);
1151f931551bSRalph Campbell 		tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC;
1152f931551bSRalph Campbell 		tx->txreq.sg_count = ndesc;
1153f931551bSRalph Campbell 		tx->txreq.addr = dev->pio_hdrs_phys +
1154f931551bSRalph Campbell 			tx->hdr_inx * sizeof(struct qib_pio_header);
1155f931551bSRalph Campbell 		tx->hdr_dwords = hdrwords + 2; /* add PBC length */
1156f931551bSRalph Campbell 		ret = qib_sdma_verbs_send(ppd, ss, dwords, tx);
1157f931551bSRalph Campbell 		goto bail;
1158f931551bSRalph Campbell 	}
1159f931551bSRalph Campbell 
1160f931551bSRalph Campbell 	/* Allocate a buffer and copy the header and payload to it. */
1161f931551bSRalph Campbell 	tx->hdr_dwords = plen + 1;
1162f931551bSRalph Campbell 	phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC);
1163f931551bSRalph Campbell 	if (!phdr)
1164f931551bSRalph Campbell 		goto err_tx;
1165f931551bSRalph Campbell 	phdr->pbc[0] = cpu_to_le32(plen);
1166f931551bSRalph Campbell 	phdr->pbc[1] = cpu_to_le32(control);
1167f931551bSRalph Campbell 	memcpy(&phdr->hdr, hdr, hdrwords << 2);
1168f931551bSRalph Campbell 	qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len);
1169f931551bSRalph Campbell 
1170f931551bSRalph Campbell 	tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr,
1171f931551bSRalph Campbell 					tx->hdr_dwords << 2, DMA_TO_DEVICE);
1172f931551bSRalph Campbell 	if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr))
1173f931551bSRalph Campbell 		goto map_err;
1174f931551bSRalph Campbell 	tx->align_buf = phdr;
1175f931551bSRalph Campbell 	tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF;
1176f931551bSRalph Campbell 	tx->txreq.sg_count = 1;
1177f931551bSRalph Campbell 	ret = qib_sdma_verbs_send(ppd, NULL, 0, tx);
1178f931551bSRalph Campbell 	goto unaligned;
1179f931551bSRalph Campbell 
1180f931551bSRalph Campbell map_err:
1181f931551bSRalph Campbell 	kfree(phdr);
1182f931551bSRalph Campbell err_tx:
1183f931551bSRalph Campbell 	qib_put_txreq(tx);
1184f931551bSRalph Campbell 	ret = wait_kmem(dev, qp);
1185f931551bSRalph Campbell unaligned:
1186f931551bSRalph Campbell 	ibp->n_unaligned++;
1187f931551bSRalph Campbell bail:
1188f931551bSRalph Campbell 	return ret;
1189f931551bSRalph Campbell }
1190f931551bSRalph Campbell 
1191f931551bSRalph Campbell /*
1192f931551bSRalph Campbell  * If we are now in the error state, return zero to flush the
1193f931551bSRalph Campbell  * send work request.
1194f931551bSRalph Campbell  */
1195f931551bSRalph Campbell static int no_bufs_available(struct qib_qp *qp)
1196f931551bSRalph Campbell {
1197f931551bSRalph Campbell 	struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1198f931551bSRalph Campbell 	struct qib_devdata *dd;
1199f931551bSRalph Campbell 	unsigned long flags;
1200f931551bSRalph Campbell 	int ret = 0;
1201f931551bSRalph Campbell 
1202f931551bSRalph Campbell 	/*
1203f931551bSRalph Campbell 	 * Note that as soon as want_buffer() is called and
1204f931551bSRalph Campbell 	 * possibly before it returns, qib_ib_piobufavail()
1205f931551bSRalph Campbell 	 * could be called. Therefore, put QP on the I/O wait list before
1206f931551bSRalph Campbell 	 * enabling the PIO avail interrupt.
1207f931551bSRalph Campbell 	 */
1208f931551bSRalph Campbell 	spin_lock_irqsave(&qp->s_lock, flags);
1209f931551bSRalph Campbell 	if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
1210f931551bSRalph Campbell 		spin_lock(&dev->pending_lock);
1211f931551bSRalph Campbell 		if (list_empty(&qp->iowait)) {
1212f931551bSRalph Campbell 			dev->n_piowait++;
1213f931551bSRalph Campbell 			qp->s_flags |= QIB_S_WAIT_PIO;
1214f931551bSRalph Campbell 			list_add_tail(&qp->iowait, &dev->piowait);
1215f931551bSRalph Campbell 			dd = dd_from_dev(dev);
1216f931551bSRalph Campbell 			dd->f_wantpiobuf_intr(dd, 1);
1217f931551bSRalph Campbell 		}
1218f931551bSRalph Campbell 		spin_unlock(&dev->pending_lock);
1219f931551bSRalph Campbell 		qp->s_flags &= ~QIB_S_BUSY;
1220f931551bSRalph Campbell 		ret = -EBUSY;
1221f931551bSRalph Campbell 	}
1222f931551bSRalph Campbell 	spin_unlock_irqrestore(&qp->s_lock, flags);
1223f931551bSRalph Campbell 	return ret;
1224f931551bSRalph Campbell }
1225f931551bSRalph Campbell 
1226f931551bSRalph Campbell static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr,
1227f931551bSRalph Campbell 			      u32 hdrwords, struct qib_sge_state *ss, u32 len,
1228f931551bSRalph Campbell 			      u32 plen, u32 dwords)
1229f931551bSRalph Campbell {
1230f931551bSRalph Campbell 	struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1231f931551bSRalph Campbell 	struct qib_pportdata *ppd = dd->pport + qp->port_num - 1;
1232f931551bSRalph Campbell 	u32 *hdr = (u32 *) ibhdr;
1233f931551bSRalph Campbell 	u32 __iomem *piobuf_orig;
1234f931551bSRalph Campbell 	u32 __iomem *piobuf;
1235f931551bSRalph Campbell 	u64 pbc;
1236f931551bSRalph Campbell 	unsigned long flags;
1237f931551bSRalph Campbell 	unsigned flush_wc;
1238f931551bSRalph Campbell 	u32 control;
1239f931551bSRalph Campbell 	u32 pbufn;
1240f931551bSRalph Campbell 
1241f931551bSRalph Campbell 	control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
1242f931551bSRalph Campbell 		be16_to_cpu(ibhdr->lrh[0]) >> 12);
1243f931551bSRalph Campbell 	pbc = ((u64) control << 32) | plen;
1244f931551bSRalph Campbell 	piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
1245f931551bSRalph Campbell 	if (unlikely(piobuf == NULL))
1246f931551bSRalph Campbell 		return no_bufs_available(qp);
1247f931551bSRalph Campbell 
1248f931551bSRalph Campbell 	/*
1249f931551bSRalph Campbell 	 * Write the pbc.
1250f931551bSRalph Campbell 	 * We have to flush after the PBC for correctness on some cpus
1251f931551bSRalph Campbell 	 * or WC buffer can be written out of order.
1252f931551bSRalph Campbell 	 */
1253f931551bSRalph Campbell 	writeq(pbc, piobuf);
1254f931551bSRalph Campbell 	piobuf_orig = piobuf;
1255f931551bSRalph Campbell 	piobuf += 2;
1256f931551bSRalph Campbell 
1257f931551bSRalph Campbell 	flush_wc = dd->flags & QIB_PIO_FLUSH_WC;
1258f931551bSRalph Campbell 	if (len == 0) {
1259f931551bSRalph Campbell 		/*
1260f931551bSRalph Campbell 		 * If there is just the header portion, must flush before
1261f931551bSRalph Campbell 		 * writing last word of header for correctness, and after
1262f931551bSRalph Campbell 		 * the last header word (trigger word).
1263f931551bSRalph Campbell 		 */
1264f931551bSRalph Campbell 		if (flush_wc) {
1265f931551bSRalph Campbell 			qib_flush_wc();
1266f931551bSRalph Campbell 			qib_pio_copy(piobuf, hdr, hdrwords - 1);
1267f931551bSRalph Campbell 			qib_flush_wc();
1268f931551bSRalph Campbell 			__raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
1269f931551bSRalph Campbell 			qib_flush_wc();
1270f931551bSRalph Campbell 		} else
1271f931551bSRalph Campbell 			qib_pio_copy(piobuf, hdr, hdrwords);
1272f931551bSRalph Campbell 		goto done;
1273f931551bSRalph Campbell 	}
1274f931551bSRalph Campbell 
1275f931551bSRalph Campbell 	if (flush_wc)
1276f931551bSRalph Campbell 		qib_flush_wc();
1277f931551bSRalph Campbell 	qib_pio_copy(piobuf, hdr, hdrwords);
1278f931551bSRalph Campbell 	piobuf += hdrwords;
1279f931551bSRalph Campbell 
1280f931551bSRalph Campbell 	/* The common case is aligned and contained in one segment. */
1281f931551bSRalph Campbell 	if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
1282f931551bSRalph Campbell 		   !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
1283f931551bSRalph Campbell 		u32 *addr = (u32 *) ss->sge.vaddr;
1284f931551bSRalph Campbell 
1285f931551bSRalph Campbell 		/* Update address before sending packet. */
1286f931551bSRalph Campbell 		update_sge(ss, len);
1287f931551bSRalph Campbell 		if (flush_wc) {
1288f931551bSRalph Campbell 			qib_pio_copy(piobuf, addr, dwords - 1);
1289f931551bSRalph Campbell 			/* must flush early everything before trigger word */
1290f931551bSRalph Campbell 			qib_flush_wc();
1291f931551bSRalph Campbell 			__raw_writel(addr[dwords - 1], piobuf + dwords - 1);
1292f931551bSRalph Campbell 			/* be sure trigger word is written */
1293f931551bSRalph Campbell 			qib_flush_wc();
1294f931551bSRalph Campbell 		} else
1295f931551bSRalph Campbell 			qib_pio_copy(piobuf, addr, dwords);
1296f931551bSRalph Campbell 		goto done;
1297f931551bSRalph Campbell 	}
1298f931551bSRalph Campbell 	copy_io(piobuf, ss, len, flush_wc);
1299f931551bSRalph Campbell done:
1300f931551bSRalph Campbell 	if (dd->flags & QIB_USE_SPCL_TRIG) {
1301f931551bSRalph Campbell 		u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
1302f931551bSRalph Campbell 		qib_flush_wc();
1303f931551bSRalph Campbell 		__raw_writel(0xaebecede, piobuf_orig + spcl_off);
1304f931551bSRalph Campbell 	}
1305f931551bSRalph Campbell 	qib_sendbuf_done(dd, pbufn);
1306f931551bSRalph Campbell 	if (qp->s_rdma_mr) {
1307f931551bSRalph Campbell 		atomic_dec(&qp->s_rdma_mr->refcount);
1308f931551bSRalph Campbell 		qp->s_rdma_mr = NULL;
1309f931551bSRalph Campbell 	}
1310f931551bSRalph Campbell 	if (qp->s_wqe) {
1311f931551bSRalph Campbell 		spin_lock_irqsave(&qp->s_lock, flags);
1312f931551bSRalph Campbell 		qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
1313f931551bSRalph Campbell 		spin_unlock_irqrestore(&qp->s_lock, flags);
1314f931551bSRalph Campbell 	} else if (qp->ibqp.qp_type == IB_QPT_RC) {
1315f931551bSRalph Campbell 		spin_lock_irqsave(&qp->s_lock, flags);
1316f931551bSRalph Campbell 		qib_rc_send_complete(qp, ibhdr);
1317f931551bSRalph Campbell 		spin_unlock_irqrestore(&qp->s_lock, flags);
1318f931551bSRalph Campbell 	}
1319f931551bSRalph Campbell 	return 0;
1320f931551bSRalph Campbell }
1321f931551bSRalph Campbell 
1322f931551bSRalph Campbell /**
1323f931551bSRalph Campbell  * qib_verbs_send - send a packet
1324f931551bSRalph Campbell  * @qp: the QP to send on
1325f931551bSRalph Campbell  * @hdr: the packet header
1326f931551bSRalph Campbell  * @hdrwords: the number of 32-bit words in the header
1327f931551bSRalph Campbell  * @ss: the SGE to send
1328f931551bSRalph Campbell  * @len: the length of the packet in bytes
1329f931551bSRalph Campbell  *
1330f931551bSRalph Campbell  * Return zero if packet is sent or queued OK.
1331f931551bSRalph Campbell  * Return non-zero and clear qp->s_flags QIB_S_BUSY otherwise.
1332f931551bSRalph Campbell  */
1333f931551bSRalph Campbell int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
1334f931551bSRalph Campbell 		   u32 hdrwords, struct qib_sge_state *ss, u32 len)
1335f931551bSRalph Campbell {
1336f931551bSRalph Campbell 	struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1337f931551bSRalph Campbell 	u32 plen;
1338f931551bSRalph Campbell 	int ret;
1339f931551bSRalph Campbell 	u32 dwords = (len + 3) >> 2;
1340f931551bSRalph Campbell 
1341f931551bSRalph Campbell 	/*
1342f931551bSRalph Campbell 	 * Calculate the send buffer trigger address.
1343f931551bSRalph Campbell 	 * The +1 counts for the pbc control dword following the pbc length.
1344f931551bSRalph Campbell 	 */
1345f931551bSRalph Campbell 	plen = hdrwords + dwords + 1;
1346f931551bSRalph Campbell 
1347f931551bSRalph Campbell 	/*
1348f931551bSRalph Campbell 	 * VL15 packets (IB_QPT_SMI) will always use PIO, so we
1349f931551bSRalph Campbell 	 * can defer SDMA restart until link goes ACTIVE without
1350f931551bSRalph Campbell 	 * worrying about just how we got there.
1351f931551bSRalph Campbell 	 */
1352f931551bSRalph Campbell 	if (qp->ibqp.qp_type == IB_QPT_SMI ||
1353f931551bSRalph Campbell 	    !(dd->flags & QIB_HAS_SEND_DMA))
1354f931551bSRalph Campbell 		ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len,
1355f931551bSRalph Campbell 					 plen, dwords);
1356f931551bSRalph Campbell 	else
1357f931551bSRalph Campbell 		ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len,
1358f931551bSRalph Campbell 					 plen, dwords);
1359f931551bSRalph Campbell 
1360f931551bSRalph Campbell 	return ret;
1361f931551bSRalph Campbell }
1362f931551bSRalph Campbell 
1363f931551bSRalph Campbell int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
1364f931551bSRalph Campbell 			  u64 *rwords, u64 *spkts, u64 *rpkts,
1365f931551bSRalph Campbell 			  u64 *xmit_wait)
1366f931551bSRalph Campbell {
1367f931551bSRalph Campbell 	int ret;
1368f931551bSRalph Campbell 	struct qib_devdata *dd = ppd->dd;
1369f931551bSRalph Campbell 
1370f931551bSRalph Campbell 	if (!(dd->flags & QIB_PRESENT)) {
1371f931551bSRalph Campbell 		/* no hardware, freeze, etc. */
1372f931551bSRalph Campbell 		ret = -EINVAL;
1373f931551bSRalph Campbell 		goto bail;
1374f931551bSRalph Campbell 	}
1375f931551bSRalph Campbell 	*swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND);
1376f931551bSRalph Campbell 	*rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV);
1377f931551bSRalph Campbell 	*spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND);
1378f931551bSRalph Campbell 	*rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV);
1379f931551bSRalph Campbell 	*xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL);
1380f931551bSRalph Campbell 
1381f931551bSRalph Campbell 	ret = 0;
1382f931551bSRalph Campbell 
1383f931551bSRalph Campbell bail:
1384f931551bSRalph Campbell 	return ret;
1385f931551bSRalph Campbell }
1386f931551bSRalph Campbell 
1387f931551bSRalph Campbell /**
1388f931551bSRalph Campbell  * qib_get_counters - get various chip counters
1389f931551bSRalph Campbell  * @dd: the qlogic_ib device
1390f931551bSRalph Campbell  * @cntrs: counters are placed here
1391f931551bSRalph Campbell  *
1392f931551bSRalph Campbell  * Return the counters needed by recv_pma_get_portcounters().
1393f931551bSRalph Campbell  */
1394f931551bSRalph Campbell int qib_get_counters(struct qib_pportdata *ppd,
1395f931551bSRalph Campbell 		     struct qib_verbs_counters *cntrs)
1396f931551bSRalph Campbell {
1397f931551bSRalph Campbell 	int ret;
1398f931551bSRalph Campbell 
1399f931551bSRalph Campbell 	if (!(ppd->dd->flags & QIB_PRESENT)) {
1400f931551bSRalph Campbell 		/* no hardware, freeze, etc. */
1401f931551bSRalph Campbell 		ret = -EINVAL;
1402f931551bSRalph Campbell 		goto bail;
1403f931551bSRalph Campbell 	}
1404f931551bSRalph Campbell 	cntrs->symbol_error_counter =
1405f931551bSRalph Campbell 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
1406f931551bSRalph Campbell 	cntrs->link_error_recovery_counter =
1407f931551bSRalph Campbell 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV);
1408f931551bSRalph Campbell 	/*
1409f931551bSRalph Campbell 	 * The link downed counter counts when the other side downs the
1410f931551bSRalph Campbell 	 * connection.  We add in the number of times we downed the link
1411f931551bSRalph Campbell 	 * due to local link integrity errors to compensate.
1412f931551bSRalph Campbell 	 */
1413f931551bSRalph Campbell 	cntrs->link_downed_counter =
1414f931551bSRalph Campbell 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN);
1415f931551bSRalph Campbell 	cntrs->port_rcv_errors =
1416f931551bSRalph Campbell 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) +
1417f931551bSRalph Campbell 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) +
1418f931551bSRalph Campbell 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) +
1419f931551bSRalph Campbell 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) +
1420f931551bSRalph Campbell 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) +
1421f931551bSRalph Campbell 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) +
1422f931551bSRalph Campbell 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) +
1423f931551bSRalph Campbell 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) +
1424f931551bSRalph Campbell 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT);
1425f931551bSRalph Campbell 	cntrs->port_rcv_errors +=
1426f931551bSRalph Campbell 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR);
1427f931551bSRalph Campbell 	cntrs->port_rcv_errors +=
1428f931551bSRalph Campbell 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR);
1429f931551bSRalph Campbell 	cntrs->port_rcv_remphys_errors =
1430f931551bSRalph Campbell 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP);
1431f931551bSRalph Campbell 	cntrs->port_xmit_discards =
1432f931551bSRalph Campbell 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL);
1433f931551bSRalph Campbell 	cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd,
1434f931551bSRalph Campbell 			QIBPORTCNTR_WORDSEND);
1435f931551bSRalph Campbell 	cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd,
1436f931551bSRalph Campbell 			QIBPORTCNTR_WORDRCV);
1437f931551bSRalph Campbell 	cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd,
1438f931551bSRalph Campbell 			QIBPORTCNTR_PKTSEND);
1439f931551bSRalph Campbell 	cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd,
1440f931551bSRalph Campbell 			QIBPORTCNTR_PKTRCV);
1441f931551bSRalph Campbell 	cntrs->local_link_integrity_errors =
1442f931551bSRalph Campbell 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI);
1443f931551bSRalph Campbell 	cntrs->excessive_buffer_overrun_errors =
1444f931551bSRalph Campbell 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL);
1445f931551bSRalph Campbell 	cntrs->vl15_dropped =
1446f931551bSRalph Campbell 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP);
1447f931551bSRalph Campbell 
1448f931551bSRalph Campbell 	ret = 0;
1449f931551bSRalph Campbell 
1450f931551bSRalph Campbell bail:
1451f931551bSRalph Campbell 	return ret;
1452f931551bSRalph Campbell }
1453f931551bSRalph Campbell 
1454f931551bSRalph Campbell /**
1455f931551bSRalph Campbell  * qib_ib_piobufavail - callback when a PIO buffer is available
1456f931551bSRalph Campbell  * @dd: the device pointer
1457f931551bSRalph Campbell  *
1458f931551bSRalph Campbell  * This is called from qib_intr() at interrupt level when a PIO buffer is
1459f931551bSRalph Campbell  * available after qib_verbs_send() returned an error that no buffers were
1460f931551bSRalph Campbell  * available. Disable the interrupt if there are no more QPs waiting.
1461f931551bSRalph Campbell  */
1462f931551bSRalph Campbell void qib_ib_piobufavail(struct qib_devdata *dd)
1463f931551bSRalph Campbell {
1464f931551bSRalph Campbell 	struct qib_ibdev *dev = &dd->verbs_dev;
1465f931551bSRalph Campbell 	struct list_head *list;
1466f931551bSRalph Campbell 	struct qib_qp *qps[5];
1467f931551bSRalph Campbell 	struct qib_qp *qp;
1468f931551bSRalph Campbell 	unsigned long flags;
1469f931551bSRalph Campbell 	unsigned i, n;
1470f931551bSRalph Campbell 
1471f931551bSRalph Campbell 	list = &dev->piowait;
1472f931551bSRalph Campbell 	n = 0;
1473f931551bSRalph Campbell 
1474f931551bSRalph Campbell 	/*
1475f931551bSRalph Campbell 	 * Note: checking that the piowait list is empty and clearing
1476f931551bSRalph Campbell 	 * the buffer available interrupt needs to be atomic or we
1477f931551bSRalph Campbell 	 * could end up with QPs on the wait list with the interrupt
1478f931551bSRalph Campbell 	 * disabled.
1479f931551bSRalph Campbell 	 */
1480f931551bSRalph Campbell 	spin_lock_irqsave(&dev->pending_lock, flags);
1481f931551bSRalph Campbell 	while (!list_empty(list)) {
1482f931551bSRalph Campbell 		if (n == ARRAY_SIZE(qps))
1483f931551bSRalph Campbell 			goto full;
1484f931551bSRalph Campbell 		qp = list_entry(list->next, struct qib_qp, iowait);
1485f931551bSRalph Campbell 		list_del_init(&qp->iowait);
1486f931551bSRalph Campbell 		atomic_inc(&qp->refcount);
1487f931551bSRalph Campbell 		qps[n++] = qp;
1488f931551bSRalph Campbell 	}
1489f931551bSRalph Campbell 	dd->f_wantpiobuf_intr(dd, 0);
1490f931551bSRalph Campbell full:
1491f931551bSRalph Campbell 	spin_unlock_irqrestore(&dev->pending_lock, flags);
1492f931551bSRalph Campbell 
1493f931551bSRalph Campbell 	for (i = 0; i < n; i++) {
1494f931551bSRalph Campbell 		qp = qps[i];
1495f931551bSRalph Campbell 
1496f931551bSRalph Campbell 		spin_lock_irqsave(&qp->s_lock, flags);
1497f931551bSRalph Campbell 		if (qp->s_flags & QIB_S_WAIT_PIO) {
1498f931551bSRalph Campbell 			qp->s_flags &= ~QIB_S_WAIT_PIO;
1499f931551bSRalph Campbell 			qib_schedule_send(qp);
1500f931551bSRalph Campbell 		}
1501f931551bSRalph Campbell 		spin_unlock_irqrestore(&qp->s_lock, flags);
1502f931551bSRalph Campbell 
1503f931551bSRalph Campbell 		/* Notify qib_destroy_qp() if it is waiting. */
1504f931551bSRalph Campbell 		if (atomic_dec_and_test(&qp->refcount))
1505f931551bSRalph Campbell 			wake_up(&qp->wait);
1506f931551bSRalph Campbell 	}
1507f931551bSRalph Campbell }
1508f931551bSRalph Campbell 
1509f931551bSRalph Campbell static int qib_query_device(struct ib_device *ibdev,
1510f931551bSRalph Campbell 			    struct ib_device_attr *props)
1511f931551bSRalph Campbell {
1512f931551bSRalph Campbell 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
1513f931551bSRalph Campbell 	struct qib_ibdev *dev = to_idev(ibdev);
1514f931551bSRalph Campbell 
1515f931551bSRalph Campbell 	memset(props, 0, sizeof(*props));
1516f931551bSRalph Campbell 
1517f931551bSRalph Campbell 	props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
1518f931551bSRalph Campbell 		IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
1519f931551bSRalph Campbell 		IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
1520f931551bSRalph Campbell 		IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
1521f931551bSRalph Campbell 	props->page_size_cap = PAGE_SIZE;
1522f931551bSRalph Campbell 	props->vendor_id =
1523f931551bSRalph Campbell 		QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3;
1524f931551bSRalph Campbell 	props->vendor_part_id = dd->deviceid;
1525f931551bSRalph Campbell 	props->hw_ver = dd->minrev;
1526f931551bSRalph Campbell 	props->sys_image_guid = ib_qib_sys_image_guid;
1527f931551bSRalph Campbell 	props->max_mr_size = ~0ULL;
1528f931551bSRalph Campbell 	props->max_qp = ib_qib_max_qps;
1529f931551bSRalph Campbell 	props->max_qp_wr = ib_qib_max_qp_wrs;
1530f931551bSRalph Campbell 	props->max_sge = ib_qib_max_sges;
1531f931551bSRalph Campbell 	props->max_cq = ib_qib_max_cqs;
1532f931551bSRalph Campbell 	props->max_ah = ib_qib_max_ahs;
1533f931551bSRalph Campbell 	props->max_cqe = ib_qib_max_cqes;
1534f931551bSRalph Campbell 	props->max_mr = dev->lk_table.max;
1535f931551bSRalph Campbell 	props->max_fmr = dev->lk_table.max;
1536f931551bSRalph Campbell 	props->max_map_per_fmr = 32767;
1537f931551bSRalph Campbell 	props->max_pd = ib_qib_max_pds;
1538f931551bSRalph Campbell 	props->max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
1539f931551bSRalph Campbell 	props->max_qp_init_rd_atom = 255;
1540f931551bSRalph Campbell 	/* props->max_res_rd_atom */
1541f931551bSRalph Campbell 	props->max_srq = ib_qib_max_srqs;
1542f931551bSRalph Campbell 	props->max_srq_wr = ib_qib_max_srq_wrs;
1543f931551bSRalph Campbell 	props->max_srq_sge = ib_qib_max_srq_sges;
1544f931551bSRalph Campbell 	/* props->local_ca_ack_delay */
1545f931551bSRalph Campbell 	props->atomic_cap = IB_ATOMIC_GLOB;
1546f931551bSRalph Campbell 	props->max_pkeys = qib_get_npkeys(dd);
1547f931551bSRalph Campbell 	props->max_mcast_grp = ib_qib_max_mcast_grps;
1548f931551bSRalph Campbell 	props->max_mcast_qp_attach = ib_qib_max_mcast_qp_attached;
1549f931551bSRalph Campbell 	props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
1550f931551bSRalph Campbell 		props->max_mcast_grp;
1551f931551bSRalph Campbell 
1552f931551bSRalph Campbell 	return 0;
1553f931551bSRalph Campbell }
1554f931551bSRalph Campbell 
1555f931551bSRalph Campbell static int qib_query_port(struct ib_device *ibdev, u8 port,
1556f931551bSRalph Campbell 			  struct ib_port_attr *props)
1557f931551bSRalph Campbell {
1558f931551bSRalph Campbell 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
1559f931551bSRalph Campbell 	struct qib_ibport *ibp = to_iport(ibdev, port);
1560f931551bSRalph Campbell 	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1561f931551bSRalph Campbell 	enum ib_mtu mtu;
1562f931551bSRalph Campbell 	u16 lid = ppd->lid;
1563f931551bSRalph Campbell 
1564f931551bSRalph Campbell 	memset(props, 0, sizeof(*props));
1565f931551bSRalph Campbell 	props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
1566f931551bSRalph Campbell 	props->lmc = ppd->lmc;
1567f931551bSRalph Campbell 	props->sm_lid = ibp->sm_lid;
1568f931551bSRalph Campbell 	props->sm_sl = ibp->sm_sl;
1569f931551bSRalph Campbell 	props->state = dd->f_iblink_state(ppd->lastibcstat);
1570f931551bSRalph Campbell 	props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat);
1571f931551bSRalph Campbell 	props->port_cap_flags = ibp->port_cap_flags;
1572f931551bSRalph Campbell 	props->gid_tbl_len = QIB_GUIDS_PER_PORT;
1573f931551bSRalph Campbell 	props->max_msg_sz = 0x80000000;
1574f931551bSRalph Campbell 	props->pkey_tbl_len = qib_get_npkeys(dd);
1575f931551bSRalph Campbell 	props->bad_pkey_cntr = ibp->pkey_violations;
1576f931551bSRalph Campbell 	props->qkey_viol_cntr = ibp->qkey_violations;
1577f931551bSRalph Campbell 	props->active_width = ppd->link_width_active;
1578f931551bSRalph Campbell 	/* See rate_show() */
1579f931551bSRalph Campbell 	props->active_speed = ppd->link_speed_active;
1580f931551bSRalph Campbell 	props->max_vl_num = qib_num_vls(ppd->vls_supported);
1581f931551bSRalph Campbell 	props->init_type_reply = 0;
1582f931551bSRalph Campbell 
1583f931551bSRalph Campbell 	props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
1584f931551bSRalph Campbell 	switch (ppd->ibmtu) {
1585f931551bSRalph Campbell 	case 4096:
1586f931551bSRalph Campbell 		mtu = IB_MTU_4096;
1587f931551bSRalph Campbell 		break;
1588f931551bSRalph Campbell 	case 2048:
1589f931551bSRalph Campbell 		mtu = IB_MTU_2048;
1590f931551bSRalph Campbell 		break;
1591f931551bSRalph Campbell 	case 1024:
1592f931551bSRalph Campbell 		mtu = IB_MTU_1024;
1593f931551bSRalph Campbell 		break;
1594f931551bSRalph Campbell 	case 512:
1595f931551bSRalph Campbell 		mtu = IB_MTU_512;
1596f931551bSRalph Campbell 		break;
1597f931551bSRalph Campbell 	case 256:
1598f931551bSRalph Campbell 		mtu = IB_MTU_256;
1599f931551bSRalph Campbell 		break;
1600f931551bSRalph Campbell 	default:
1601f931551bSRalph Campbell 		mtu = IB_MTU_2048;
1602f931551bSRalph Campbell 	}
1603f931551bSRalph Campbell 	props->active_mtu = mtu;
1604f931551bSRalph Campbell 	props->subnet_timeout = ibp->subnet_timeout;
1605f931551bSRalph Campbell 
1606f931551bSRalph Campbell 	return 0;
1607f931551bSRalph Campbell }
1608f931551bSRalph Campbell 
1609f931551bSRalph Campbell static int qib_modify_device(struct ib_device *device,
1610f931551bSRalph Campbell 			     int device_modify_mask,
1611f931551bSRalph Campbell 			     struct ib_device_modify *device_modify)
1612f931551bSRalph Campbell {
1613f931551bSRalph Campbell 	struct qib_devdata *dd = dd_from_ibdev(device);
1614f931551bSRalph Campbell 	unsigned i;
1615f931551bSRalph Campbell 	int ret;
1616f931551bSRalph Campbell 
1617f931551bSRalph Campbell 	if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
1618f931551bSRalph Campbell 				   IB_DEVICE_MODIFY_NODE_DESC)) {
1619f931551bSRalph Campbell 		ret = -EOPNOTSUPP;
1620f931551bSRalph Campbell 		goto bail;
1621f931551bSRalph Campbell 	}
1622f931551bSRalph Campbell 
1623f931551bSRalph Campbell 	if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
1624f931551bSRalph Campbell 		memcpy(device->node_desc, device_modify->node_desc, 64);
1625f931551bSRalph Campbell 		for (i = 0; i < dd->num_pports; i++) {
1626f931551bSRalph Campbell 			struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1627f931551bSRalph Campbell 
1628f931551bSRalph Campbell 			qib_node_desc_chg(ibp);
1629f931551bSRalph Campbell 		}
1630f931551bSRalph Campbell 	}
1631f931551bSRalph Campbell 
1632f931551bSRalph Campbell 	if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
1633f931551bSRalph Campbell 		ib_qib_sys_image_guid =
1634f931551bSRalph Campbell 			cpu_to_be64(device_modify->sys_image_guid);
1635f931551bSRalph Campbell 		for (i = 0; i < dd->num_pports; i++) {
1636f931551bSRalph Campbell 			struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1637f931551bSRalph Campbell 
1638f931551bSRalph Campbell 			qib_sys_guid_chg(ibp);
1639f931551bSRalph Campbell 		}
1640f931551bSRalph Campbell 	}
1641f931551bSRalph Campbell 
1642f931551bSRalph Campbell 	ret = 0;
1643f931551bSRalph Campbell 
1644f931551bSRalph Campbell bail:
1645f931551bSRalph Campbell 	return ret;
1646f931551bSRalph Campbell }
1647f931551bSRalph Campbell 
1648f931551bSRalph Campbell static int qib_modify_port(struct ib_device *ibdev, u8 port,
1649f931551bSRalph Campbell 			   int port_modify_mask, struct ib_port_modify *props)
1650f931551bSRalph Campbell {
1651f931551bSRalph Campbell 	struct qib_ibport *ibp = to_iport(ibdev, port);
1652f931551bSRalph Campbell 	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1653f931551bSRalph Campbell 
1654f931551bSRalph Campbell 	ibp->port_cap_flags |= props->set_port_cap_mask;
1655f931551bSRalph Campbell 	ibp->port_cap_flags &= ~props->clr_port_cap_mask;
1656f931551bSRalph Campbell 	if (props->set_port_cap_mask || props->clr_port_cap_mask)
1657f931551bSRalph Campbell 		qib_cap_mask_chg(ibp);
1658f931551bSRalph Campbell 	if (port_modify_mask & IB_PORT_SHUTDOWN)
1659f931551bSRalph Campbell 		qib_set_linkstate(ppd, QIB_IB_LINKDOWN);
1660f931551bSRalph Campbell 	if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
1661f931551bSRalph Campbell 		ibp->qkey_violations = 0;
1662f931551bSRalph Campbell 	return 0;
1663f931551bSRalph Campbell }
1664f931551bSRalph Campbell 
1665f931551bSRalph Campbell static int qib_query_gid(struct ib_device *ibdev, u8 port,
1666f931551bSRalph Campbell 			 int index, union ib_gid *gid)
1667f931551bSRalph Campbell {
1668f931551bSRalph Campbell 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
1669f931551bSRalph Campbell 	int ret = 0;
1670f931551bSRalph Campbell 
1671f931551bSRalph Campbell 	if (!port || port > dd->num_pports)
1672f931551bSRalph Campbell 		ret = -EINVAL;
1673f931551bSRalph Campbell 	else {
1674f931551bSRalph Campbell 		struct qib_ibport *ibp = to_iport(ibdev, port);
1675f931551bSRalph Campbell 		struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1676f931551bSRalph Campbell 
1677f931551bSRalph Campbell 		gid->global.subnet_prefix = ibp->gid_prefix;
1678f931551bSRalph Campbell 		if (index == 0)
1679f931551bSRalph Campbell 			gid->global.interface_id = ppd->guid;
1680f931551bSRalph Campbell 		else if (index < QIB_GUIDS_PER_PORT)
1681f931551bSRalph Campbell 			gid->global.interface_id = ibp->guids[index - 1];
1682f931551bSRalph Campbell 		else
1683f931551bSRalph Campbell 			ret = -EINVAL;
1684f931551bSRalph Campbell 	}
1685f931551bSRalph Campbell 
1686f931551bSRalph Campbell 	return ret;
1687f931551bSRalph Campbell }
1688f931551bSRalph Campbell 
1689f931551bSRalph Campbell static struct ib_pd *qib_alloc_pd(struct ib_device *ibdev,
1690f931551bSRalph Campbell 				  struct ib_ucontext *context,
1691f931551bSRalph Campbell 				  struct ib_udata *udata)
1692f931551bSRalph Campbell {
1693f931551bSRalph Campbell 	struct qib_ibdev *dev = to_idev(ibdev);
1694f931551bSRalph Campbell 	struct qib_pd *pd;
1695f931551bSRalph Campbell 	struct ib_pd *ret;
1696f931551bSRalph Campbell 
1697f931551bSRalph Campbell 	/*
1698f931551bSRalph Campbell 	 * This is actually totally arbitrary.  Some correctness tests
1699f931551bSRalph Campbell 	 * assume there's a maximum number of PDs that can be allocated.
1700f931551bSRalph Campbell 	 * We don't actually have this limit, but we fail the test if
1701f931551bSRalph Campbell 	 * we allow allocations of more than we report for this value.
1702f931551bSRalph Campbell 	 */
1703f931551bSRalph Campbell 
1704f931551bSRalph Campbell 	pd = kmalloc(sizeof *pd, GFP_KERNEL);
1705f931551bSRalph Campbell 	if (!pd) {
1706f931551bSRalph Campbell 		ret = ERR_PTR(-ENOMEM);
1707f931551bSRalph Campbell 		goto bail;
1708f931551bSRalph Campbell 	}
1709f931551bSRalph Campbell 
1710f931551bSRalph Campbell 	spin_lock(&dev->n_pds_lock);
1711f931551bSRalph Campbell 	if (dev->n_pds_allocated == ib_qib_max_pds) {
1712f931551bSRalph Campbell 		spin_unlock(&dev->n_pds_lock);
1713f931551bSRalph Campbell 		kfree(pd);
1714f931551bSRalph Campbell 		ret = ERR_PTR(-ENOMEM);
1715f931551bSRalph Campbell 		goto bail;
1716f931551bSRalph Campbell 	}
1717f931551bSRalph Campbell 
1718f931551bSRalph Campbell 	dev->n_pds_allocated++;
1719f931551bSRalph Campbell 	spin_unlock(&dev->n_pds_lock);
1720f931551bSRalph Campbell 
1721f931551bSRalph Campbell 	/* ib_alloc_pd() will initialize pd->ibpd. */
1722f931551bSRalph Campbell 	pd->user = udata != NULL;
1723f931551bSRalph Campbell 
1724f931551bSRalph Campbell 	ret = &pd->ibpd;
1725f931551bSRalph Campbell 
1726f931551bSRalph Campbell bail:
1727f931551bSRalph Campbell 	return ret;
1728f931551bSRalph Campbell }
1729f931551bSRalph Campbell 
1730f931551bSRalph Campbell static int qib_dealloc_pd(struct ib_pd *ibpd)
1731f931551bSRalph Campbell {
1732f931551bSRalph Campbell 	struct qib_pd *pd = to_ipd(ibpd);
1733f931551bSRalph Campbell 	struct qib_ibdev *dev = to_idev(ibpd->device);
1734f931551bSRalph Campbell 
1735f931551bSRalph Campbell 	spin_lock(&dev->n_pds_lock);
1736f931551bSRalph Campbell 	dev->n_pds_allocated--;
1737f931551bSRalph Campbell 	spin_unlock(&dev->n_pds_lock);
1738f931551bSRalph Campbell 
1739f931551bSRalph Campbell 	kfree(pd);
1740f931551bSRalph Campbell 
1741f931551bSRalph Campbell 	return 0;
1742f931551bSRalph Campbell }
1743f931551bSRalph Campbell 
1744f931551bSRalph Campbell int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr)
1745f931551bSRalph Campbell {
1746f931551bSRalph Campbell 	/* A multicast address requires a GRH (see ch. 8.4.1). */
1747f931551bSRalph Campbell 	if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE &&
1748f931551bSRalph Campbell 	    ah_attr->dlid != QIB_PERMISSIVE_LID &&
1749f931551bSRalph Campbell 	    !(ah_attr->ah_flags & IB_AH_GRH))
1750f931551bSRalph Campbell 		goto bail;
1751f931551bSRalph Campbell 	if ((ah_attr->ah_flags & IB_AH_GRH) &&
1752f931551bSRalph Campbell 	    ah_attr->grh.sgid_index >= QIB_GUIDS_PER_PORT)
1753f931551bSRalph Campbell 		goto bail;
1754f931551bSRalph Campbell 	if (ah_attr->dlid == 0)
1755f931551bSRalph Campbell 		goto bail;
1756f931551bSRalph Campbell 	if (ah_attr->port_num < 1 ||
1757f931551bSRalph Campbell 	    ah_attr->port_num > ibdev->phys_port_cnt)
1758f931551bSRalph Campbell 		goto bail;
1759f931551bSRalph Campbell 	if (ah_attr->static_rate != IB_RATE_PORT_CURRENT &&
1760f931551bSRalph Campbell 	    ib_rate_to_mult(ah_attr->static_rate) < 0)
1761f931551bSRalph Campbell 		goto bail;
1762f931551bSRalph Campbell 	if (ah_attr->sl > 15)
1763f931551bSRalph Campbell 		goto bail;
1764f931551bSRalph Campbell 	return 0;
1765f931551bSRalph Campbell bail:
1766f931551bSRalph Campbell 	return -EINVAL;
1767f931551bSRalph Campbell }
1768f931551bSRalph Campbell 
1769f931551bSRalph Campbell /**
1770f931551bSRalph Campbell  * qib_create_ah - create an address handle
1771f931551bSRalph Campbell  * @pd: the protection domain
1772f931551bSRalph Campbell  * @ah_attr: the attributes of the AH
1773f931551bSRalph Campbell  *
1774f931551bSRalph Campbell  * This may be called from interrupt context.
1775f931551bSRalph Campbell  */
1776f931551bSRalph Campbell static struct ib_ah *qib_create_ah(struct ib_pd *pd,
1777f931551bSRalph Campbell 				   struct ib_ah_attr *ah_attr)
1778f931551bSRalph Campbell {
1779f931551bSRalph Campbell 	struct qib_ah *ah;
1780f931551bSRalph Campbell 	struct ib_ah *ret;
1781f931551bSRalph Campbell 	struct qib_ibdev *dev = to_idev(pd->device);
1782f931551bSRalph Campbell 	unsigned long flags;
1783f931551bSRalph Campbell 
1784f931551bSRalph Campbell 	if (qib_check_ah(pd->device, ah_attr)) {
1785f931551bSRalph Campbell 		ret = ERR_PTR(-EINVAL);
1786f931551bSRalph Campbell 		goto bail;
1787f931551bSRalph Campbell 	}
1788f931551bSRalph Campbell 
1789f931551bSRalph Campbell 	ah = kmalloc(sizeof *ah, GFP_ATOMIC);
1790f931551bSRalph Campbell 	if (!ah) {
1791f931551bSRalph Campbell 		ret = ERR_PTR(-ENOMEM);
1792f931551bSRalph Campbell 		goto bail;
1793f931551bSRalph Campbell 	}
1794f931551bSRalph Campbell 
1795f931551bSRalph Campbell 	spin_lock_irqsave(&dev->n_ahs_lock, flags);
1796f931551bSRalph Campbell 	if (dev->n_ahs_allocated == ib_qib_max_ahs) {
1797f931551bSRalph Campbell 		spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1798f931551bSRalph Campbell 		kfree(ah);
1799f931551bSRalph Campbell 		ret = ERR_PTR(-ENOMEM);
1800f931551bSRalph Campbell 		goto bail;
1801f931551bSRalph Campbell 	}
1802f931551bSRalph Campbell 
1803f931551bSRalph Campbell 	dev->n_ahs_allocated++;
1804f931551bSRalph Campbell 	spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1805f931551bSRalph Campbell 
1806f931551bSRalph Campbell 	/* ib_create_ah() will initialize ah->ibah. */
1807f931551bSRalph Campbell 	ah->attr = *ah_attr;
1808f931551bSRalph Campbell 	atomic_set(&ah->refcount, 0);
1809f931551bSRalph Campbell 
1810f931551bSRalph Campbell 	ret = &ah->ibah;
1811f931551bSRalph Campbell 
1812f931551bSRalph Campbell bail:
1813f931551bSRalph Campbell 	return ret;
1814f931551bSRalph Campbell }
1815f931551bSRalph Campbell 
1816f931551bSRalph Campbell /**
1817f931551bSRalph Campbell  * qib_destroy_ah - destroy an address handle
1818f931551bSRalph Campbell  * @ibah: the AH to destroy
1819f931551bSRalph Campbell  *
1820f931551bSRalph Campbell  * This may be called from interrupt context.
1821f931551bSRalph Campbell  */
1822f931551bSRalph Campbell static int qib_destroy_ah(struct ib_ah *ibah)
1823f931551bSRalph Campbell {
1824f931551bSRalph Campbell 	struct qib_ibdev *dev = to_idev(ibah->device);
1825f931551bSRalph Campbell 	struct qib_ah *ah = to_iah(ibah);
1826f931551bSRalph Campbell 	unsigned long flags;
1827f931551bSRalph Campbell 
1828f931551bSRalph Campbell 	if (atomic_read(&ah->refcount) != 0)
1829f931551bSRalph Campbell 		return -EBUSY;
1830f931551bSRalph Campbell 
1831f931551bSRalph Campbell 	spin_lock_irqsave(&dev->n_ahs_lock, flags);
1832f931551bSRalph Campbell 	dev->n_ahs_allocated--;
1833f931551bSRalph Campbell 	spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1834f931551bSRalph Campbell 
1835f931551bSRalph Campbell 	kfree(ah);
1836f931551bSRalph Campbell 
1837f931551bSRalph Campbell 	return 0;
1838f931551bSRalph Campbell }
1839f931551bSRalph Campbell 
1840f931551bSRalph Campbell static int qib_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
1841f931551bSRalph Campbell {
1842f931551bSRalph Campbell 	struct qib_ah *ah = to_iah(ibah);
1843f931551bSRalph Campbell 
1844f931551bSRalph Campbell 	if (qib_check_ah(ibah->device, ah_attr))
1845f931551bSRalph Campbell 		return -EINVAL;
1846f931551bSRalph Campbell 
1847f931551bSRalph Campbell 	ah->attr = *ah_attr;
1848f931551bSRalph Campbell 
1849f931551bSRalph Campbell 	return 0;
1850f931551bSRalph Campbell }
1851f931551bSRalph Campbell 
1852f931551bSRalph Campbell static int qib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
1853f931551bSRalph Campbell {
1854f931551bSRalph Campbell 	struct qib_ah *ah = to_iah(ibah);
1855f931551bSRalph Campbell 
1856f931551bSRalph Campbell 	*ah_attr = ah->attr;
1857f931551bSRalph Campbell 
1858f931551bSRalph Campbell 	return 0;
1859f931551bSRalph Campbell }
1860f931551bSRalph Campbell 
1861f931551bSRalph Campbell /**
1862f931551bSRalph Campbell  * qib_get_npkeys - return the size of the PKEY table for context 0
1863f931551bSRalph Campbell  * @dd: the qlogic_ib device
1864f931551bSRalph Campbell  */
1865f931551bSRalph Campbell unsigned qib_get_npkeys(struct qib_devdata *dd)
1866f931551bSRalph Campbell {
1867f931551bSRalph Campbell 	return ARRAY_SIZE(dd->rcd[0]->pkeys);
1868f931551bSRalph Campbell }
1869f931551bSRalph Campbell 
1870f931551bSRalph Campbell /*
1871f931551bSRalph Campbell  * Return the indexed PKEY from the port PKEY table.
1872f931551bSRalph Campbell  * No need to validate rcd[ctxt]; the port is setup if we are here.
1873f931551bSRalph Campbell  */
1874f931551bSRalph Campbell unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index)
1875f931551bSRalph Campbell {
1876f931551bSRalph Campbell 	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1877f931551bSRalph Campbell 	struct qib_devdata *dd = ppd->dd;
1878f931551bSRalph Campbell 	unsigned ctxt = ppd->hw_pidx;
1879f931551bSRalph Campbell 	unsigned ret;
1880f931551bSRalph Campbell 
1881f931551bSRalph Campbell 	/* dd->rcd null if mini_init or some init failures */
1882f931551bSRalph Campbell 	if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys))
1883f931551bSRalph Campbell 		ret = 0;
1884f931551bSRalph Campbell 	else
1885f931551bSRalph Campbell 		ret = dd->rcd[ctxt]->pkeys[index];
1886f931551bSRalph Campbell 
1887f931551bSRalph Campbell 	return ret;
1888f931551bSRalph Campbell }
1889f931551bSRalph Campbell 
1890f931551bSRalph Campbell static int qib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
1891f931551bSRalph Campbell 			  u16 *pkey)
1892f931551bSRalph Campbell {
1893f931551bSRalph Campbell 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
1894f931551bSRalph Campbell 	int ret;
1895f931551bSRalph Campbell 
1896f931551bSRalph Campbell 	if (index >= qib_get_npkeys(dd)) {
1897f931551bSRalph Campbell 		ret = -EINVAL;
1898f931551bSRalph Campbell 		goto bail;
1899f931551bSRalph Campbell 	}
1900f931551bSRalph Campbell 
1901f931551bSRalph Campbell 	*pkey = qib_get_pkey(to_iport(ibdev, port), index);
1902f931551bSRalph Campbell 	ret = 0;
1903f931551bSRalph Campbell 
1904f931551bSRalph Campbell bail:
1905f931551bSRalph Campbell 	return ret;
1906f931551bSRalph Campbell }
1907f931551bSRalph Campbell 
1908f931551bSRalph Campbell /**
1909f931551bSRalph Campbell  * qib_alloc_ucontext - allocate a ucontest
1910f931551bSRalph Campbell  * @ibdev: the infiniband device
1911f931551bSRalph Campbell  * @udata: not used by the QLogic_IB driver
1912f931551bSRalph Campbell  */
1913f931551bSRalph Campbell 
1914f931551bSRalph Campbell static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev,
1915f931551bSRalph Campbell 					      struct ib_udata *udata)
1916f931551bSRalph Campbell {
1917f931551bSRalph Campbell 	struct qib_ucontext *context;
1918f931551bSRalph Campbell 	struct ib_ucontext *ret;
1919f931551bSRalph Campbell 
1920f931551bSRalph Campbell 	context = kmalloc(sizeof *context, GFP_KERNEL);
1921f931551bSRalph Campbell 	if (!context) {
1922f931551bSRalph Campbell 		ret = ERR_PTR(-ENOMEM);
1923f931551bSRalph Campbell 		goto bail;
1924f931551bSRalph Campbell 	}
1925f931551bSRalph Campbell 
1926f931551bSRalph Campbell 	ret = &context->ibucontext;
1927f931551bSRalph Campbell 
1928f931551bSRalph Campbell bail:
1929f931551bSRalph Campbell 	return ret;
1930f931551bSRalph Campbell }
1931f931551bSRalph Campbell 
1932f931551bSRalph Campbell static int qib_dealloc_ucontext(struct ib_ucontext *context)
1933f931551bSRalph Campbell {
1934f931551bSRalph Campbell 	kfree(to_iucontext(context));
1935f931551bSRalph Campbell 	return 0;
1936f931551bSRalph Campbell }
1937f931551bSRalph Campbell 
1938f931551bSRalph Campbell static void init_ibport(struct qib_pportdata *ppd)
1939f931551bSRalph Campbell {
1940f931551bSRalph Campbell 	struct qib_verbs_counters cntrs;
1941f931551bSRalph Campbell 	struct qib_ibport *ibp = &ppd->ibport_data;
1942f931551bSRalph Campbell 
1943f931551bSRalph Campbell 	spin_lock_init(&ibp->lock);
1944f931551bSRalph Campbell 	/* Set the prefix to the default value (see ch. 4.1.1) */
1945f931551bSRalph Campbell 	ibp->gid_prefix = IB_DEFAULT_GID_PREFIX;
1946f931551bSRalph Campbell 	ibp->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
1947f931551bSRalph Campbell 	ibp->port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP |
1948f931551bSRalph Campbell 		IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP |
1949f931551bSRalph Campbell 		IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP |
1950f931551bSRalph Campbell 		IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP |
1951f931551bSRalph Campbell 		IB_PORT_OTHER_LOCAL_CHANGES_SUP;
1952f931551bSRalph Campbell 	if (ppd->dd->flags & QIB_HAS_LINK_LATENCY)
1953f931551bSRalph Campbell 		ibp->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
1954f931551bSRalph Campbell 	ibp->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
1955f931551bSRalph Campbell 	ibp->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1956f931551bSRalph Campbell 	ibp->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1957f931551bSRalph Campbell 	ibp->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1958f931551bSRalph Campbell 	ibp->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
1959f931551bSRalph Campbell 
1960f931551bSRalph Campbell 	/* Snapshot current HW counters to "clear" them. */
1961f931551bSRalph Campbell 	qib_get_counters(ppd, &cntrs);
1962f931551bSRalph Campbell 	ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
1963f931551bSRalph Campbell 	ibp->z_link_error_recovery_counter =
1964f931551bSRalph Campbell 		cntrs.link_error_recovery_counter;
1965f931551bSRalph Campbell 	ibp->z_link_downed_counter = cntrs.link_downed_counter;
1966f931551bSRalph Campbell 	ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1967f931551bSRalph Campbell 	ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors;
1968f931551bSRalph Campbell 	ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
1969f931551bSRalph Campbell 	ibp->z_port_xmit_data = cntrs.port_xmit_data;
1970f931551bSRalph Campbell 	ibp->z_port_rcv_data = cntrs.port_rcv_data;
1971f931551bSRalph Campbell 	ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
1972f931551bSRalph Campbell 	ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
1973f931551bSRalph Campbell 	ibp->z_local_link_integrity_errors =
1974f931551bSRalph Campbell 		cntrs.local_link_integrity_errors;
1975f931551bSRalph Campbell 	ibp->z_excessive_buffer_overrun_errors =
1976f931551bSRalph Campbell 		cntrs.excessive_buffer_overrun_errors;
1977f931551bSRalph Campbell 	ibp->z_vl15_dropped = cntrs.vl15_dropped;
1978f931551bSRalph Campbell }
1979f931551bSRalph Campbell 
1980f931551bSRalph Campbell /**
1981f931551bSRalph Campbell  * qib_register_ib_device - register our device with the infiniband core
1982f931551bSRalph Campbell  * @dd: the device data structure
1983f931551bSRalph Campbell  * Return the allocated qib_ibdev pointer or NULL on error.
1984f931551bSRalph Campbell  */
1985f931551bSRalph Campbell int qib_register_ib_device(struct qib_devdata *dd)
1986f931551bSRalph Campbell {
1987f931551bSRalph Campbell 	struct qib_ibdev *dev = &dd->verbs_dev;
1988f931551bSRalph Campbell 	struct ib_device *ibdev = &dev->ibdev;
1989f931551bSRalph Campbell 	struct qib_pportdata *ppd = dd->pport;
1990f931551bSRalph Campbell 	unsigned i, lk_tab_size;
1991f931551bSRalph Campbell 	int ret;
1992f931551bSRalph Campbell 
1993f931551bSRalph Campbell 	dev->qp_table_size = ib_qib_qp_table_size;
1994f931551bSRalph Campbell 	dev->qp_table = kzalloc(dev->qp_table_size * sizeof *dev->qp_table,
1995f931551bSRalph Campbell 				GFP_KERNEL);
1996f931551bSRalph Campbell 	if (!dev->qp_table) {
1997f931551bSRalph Campbell 		ret = -ENOMEM;
1998f931551bSRalph Campbell 		goto err_qpt;
1999f931551bSRalph Campbell 	}
2000f931551bSRalph Campbell 
2001f931551bSRalph Campbell 	for (i = 0; i < dd->num_pports; i++)
2002f931551bSRalph Campbell 		init_ibport(ppd + i);
2003f931551bSRalph Campbell 
2004f931551bSRalph Campbell 	/* Only need to initialize non-zero fields. */
2005f931551bSRalph Campbell 	spin_lock_init(&dev->qpt_lock);
2006f931551bSRalph Campbell 	spin_lock_init(&dev->n_pds_lock);
2007f931551bSRalph Campbell 	spin_lock_init(&dev->n_ahs_lock);
2008f931551bSRalph Campbell 	spin_lock_init(&dev->n_cqs_lock);
2009f931551bSRalph Campbell 	spin_lock_init(&dev->n_qps_lock);
2010f931551bSRalph Campbell 	spin_lock_init(&dev->n_srqs_lock);
2011f931551bSRalph Campbell 	spin_lock_init(&dev->n_mcast_grps_lock);
2012f931551bSRalph Campbell 	init_timer(&dev->mem_timer);
2013f931551bSRalph Campbell 	dev->mem_timer.function = mem_timer;
2014f931551bSRalph Campbell 	dev->mem_timer.data = (unsigned long) dev;
2015f931551bSRalph Campbell 
2016f931551bSRalph Campbell 	qib_init_qpn_table(dd, &dev->qpn_table);
2017f931551bSRalph Campbell 
2018f931551bSRalph Campbell 	/*
2019f931551bSRalph Campbell 	 * The top ib_qib_lkey_table_size bits are used to index the
2020f931551bSRalph Campbell 	 * table.  The lower 8 bits can be owned by the user (copied from
2021f931551bSRalph Campbell 	 * the LKEY).  The remaining bits act as a generation number or tag.
2022f931551bSRalph Campbell 	 */
2023f931551bSRalph Campbell 	spin_lock_init(&dev->lk_table.lock);
2024f931551bSRalph Campbell 	dev->lk_table.max = 1 << ib_qib_lkey_table_size;
2025f931551bSRalph Campbell 	lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
2026f931551bSRalph Campbell 	dev->lk_table.table = (struct qib_mregion **)
2027f931551bSRalph Campbell 		__get_free_pages(GFP_KERNEL, get_order(lk_tab_size));
2028f931551bSRalph Campbell 	if (dev->lk_table.table == NULL) {
2029f931551bSRalph Campbell 		ret = -ENOMEM;
2030f931551bSRalph Campbell 		goto err_lk;
2031f931551bSRalph Campbell 	}
2032f931551bSRalph Campbell 	memset(dev->lk_table.table, 0, lk_tab_size);
2033f931551bSRalph Campbell 	INIT_LIST_HEAD(&dev->pending_mmaps);
2034f931551bSRalph Campbell 	spin_lock_init(&dev->pending_lock);
2035f931551bSRalph Campbell 	dev->mmap_offset = PAGE_SIZE;
2036f931551bSRalph Campbell 	spin_lock_init(&dev->mmap_offset_lock);
2037f931551bSRalph Campbell 	INIT_LIST_HEAD(&dev->piowait);
2038f931551bSRalph Campbell 	INIT_LIST_HEAD(&dev->dmawait);
2039f931551bSRalph Campbell 	INIT_LIST_HEAD(&dev->txwait);
2040f931551bSRalph Campbell 	INIT_LIST_HEAD(&dev->memwait);
2041f931551bSRalph Campbell 	INIT_LIST_HEAD(&dev->txreq_free);
2042f931551bSRalph Campbell 
2043f931551bSRalph Campbell 	if (ppd->sdma_descq_cnt) {
2044f931551bSRalph Campbell 		dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev,
2045f931551bSRalph Campbell 						ppd->sdma_descq_cnt *
2046f931551bSRalph Campbell 						sizeof(struct qib_pio_header),
2047f931551bSRalph Campbell 						&dev->pio_hdrs_phys,
2048f931551bSRalph Campbell 						GFP_KERNEL);
2049f931551bSRalph Campbell 		if (!dev->pio_hdrs) {
2050f931551bSRalph Campbell 			ret = -ENOMEM;
2051f931551bSRalph Campbell 			goto err_hdrs;
2052f931551bSRalph Campbell 		}
2053f931551bSRalph Campbell 	}
2054f931551bSRalph Campbell 
2055f931551bSRalph Campbell 	for (i = 0; i < ppd->sdma_descq_cnt; i++) {
2056f931551bSRalph Campbell 		struct qib_verbs_txreq *tx;
2057f931551bSRalph Campbell 
2058f931551bSRalph Campbell 		tx = kzalloc(sizeof *tx, GFP_KERNEL);
2059f931551bSRalph Campbell 		if (!tx) {
2060f931551bSRalph Campbell 			ret = -ENOMEM;
2061f931551bSRalph Campbell 			goto err_tx;
2062f931551bSRalph Campbell 		}
2063f931551bSRalph Campbell 		tx->hdr_inx = i;
2064f931551bSRalph Campbell 		list_add(&tx->txreq.list, &dev->txreq_free);
2065f931551bSRalph Campbell 	}
2066f931551bSRalph Campbell 
2067f931551bSRalph Campbell 	/*
2068f931551bSRalph Campbell 	 * The system image GUID is supposed to be the same for all
2069f931551bSRalph Campbell 	 * IB HCAs in a single system but since there can be other
2070f931551bSRalph Campbell 	 * device types in the system, we can't be sure this is unique.
2071f931551bSRalph Campbell 	 */
2072f931551bSRalph Campbell 	if (!ib_qib_sys_image_guid)
2073f931551bSRalph Campbell 		ib_qib_sys_image_guid = ppd->guid;
2074f931551bSRalph Campbell 
2075f931551bSRalph Campbell 	strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX);
2076f931551bSRalph Campbell 	ibdev->owner = THIS_MODULE;
2077f931551bSRalph Campbell 	ibdev->node_guid = ppd->guid;
2078f931551bSRalph Campbell 	ibdev->uverbs_abi_ver = QIB_UVERBS_ABI_VERSION;
2079f931551bSRalph Campbell 	ibdev->uverbs_cmd_mask =
2080f931551bSRalph Campbell 		(1ull << IB_USER_VERBS_CMD_GET_CONTEXT)         |
2081f931551bSRalph Campbell 		(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)        |
2082f931551bSRalph Campbell 		(1ull << IB_USER_VERBS_CMD_QUERY_PORT)          |
2083f931551bSRalph Campbell 		(1ull << IB_USER_VERBS_CMD_ALLOC_PD)            |
2084f931551bSRalph Campbell 		(1ull << IB_USER_VERBS_CMD_DEALLOC_PD)          |
2085f931551bSRalph Campbell 		(1ull << IB_USER_VERBS_CMD_CREATE_AH)           |
2086f931551bSRalph Campbell 		(1ull << IB_USER_VERBS_CMD_MODIFY_AH)           |
2087f931551bSRalph Campbell 		(1ull << IB_USER_VERBS_CMD_QUERY_AH)            |
2088f931551bSRalph Campbell 		(1ull << IB_USER_VERBS_CMD_DESTROY_AH)          |
2089f931551bSRalph Campbell 		(1ull << IB_USER_VERBS_CMD_REG_MR)              |
2090f931551bSRalph Campbell 		(1ull << IB_USER_VERBS_CMD_DEREG_MR)            |
2091f931551bSRalph Campbell 		(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2092f931551bSRalph Campbell 		(1ull << IB_USER_VERBS_CMD_CREATE_CQ)           |
2093f931551bSRalph Campbell 		(1ull << IB_USER_VERBS_CMD_RESIZE_CQ)           |
2094f931551bSRalph Campbell 		(1ull << IB_USER_VERBS_CMD_DESTROY_CQ)          |
2095f931551bSRalph Campbell 		(1ull << IB_USER_VERBS_CMD_POLL_CQ)             |
2096f931551bSRalph Campbell 		(1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)       |
2097f931551bSRalph Campbell 		(1ull << IB_USER_VERBS_CMD_CREATE_QP)           |
2098f931551bSRalph Campbell 		(1ull << IB_USER_VERBS_CMD_QUERY_QP)            |
2099f931551bSRalph Campbell 		(1ull << IB_USER_VERBS_CMD_MODIFY_QP)           |
2100f931551bSRalph Campbell 		(1ull << IB_USER_VERBS_CMD_DESTROY_QP)          |
2101f931551bSRalph Campbell 		(1ull << IB_USER_VERBS_CMD_POST_SEND)           |
2102f931551bSRalph Campbell 		(1ull << IB_USER_VERBS_CMD_POST_RECV)           |
2103f931551bSRalph Campbell 		(1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)        |
2104f931551bSRalph Campbell 		(1ull << IB_USER_VERBS_CMD_DETACH_MCAST)        |
2105f931551bSRalph Campbell 		(1ull << IB_USER_VERBS_CMD_CREATE_SRQ)          |
2106f931551bSRalph Campbell 		(1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)          |
2107f931551bSRalph Campbell 		(1ull << IB_USER_VERBS_CMD_QUERY_SRQ)           |
2108f931551bSRalph Campbell 		(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)         |
2109f931551bSRalph Campbell 		(1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
2110f931551bSRalph Campbell 	ibdev->node_type = RDMA_NODE_IB_CA;
2111f931551bSRalph Campbell 	ibdev->phys_port_cnt = dd->num_pports;
2112f931551bSRalph Campbell 	ibdev->num_comp_vectors = 1;
2113f931551bSRalph Campbell 	ibdev->dma_device = &dd->pcidev->dev;
2114f931551bSRalph Campbell 	ibdev->query_device = qib_query_device;
2115f931551bSRalph Campbell 	ibdev->modify_device = qib_modify_device;
2116f931551bSRalph Campbell 	ibdev->query_port = qib_query_port;
2117f931551bSRalph Campbell 	ibdev->modify_port = qib_modify_port;
2118f931551bSRalph Campbell 	ibdev->query_pkey = qib_query_pkey;
2119f931551bSRalph Campbell 	ibdev->query_gid = qib_query_gid;
2120f931551bSRalph Campbell 	ibdev->alloc_ucontext = qib_alloc_ucontext;
2121f931551bSRalph Campbell 	ibdev->dealloc_ucontext = qib_dealloc_ucontext;
2122f931551bSRalph Campbell 	ibdev->alloc_pd = qib_alloc_pd;
2123f931551bSRalph Campbell 	ibdev->dealloc_pd = qib_dealloc_pd;
2124f931551bSRalph Campbell 	ibdev->create_ah = qib_create_ah;
2125f931551bSRalph Campbell 	ibdev->destroy_ah = qib_destroy_ah;
2126f931551bSRalph Campbell 	ibdev->modify_ah = qib_modify_ah;
2127f931551bSRalph Campbell 	ibdev->query_ah = qib_query_ah;
2128f931551bSRalph Campbell 	ibdev->create_srq = qib_create_srq;
2129f931551bSRalph Campbell 	ibdev->modify_srq = qib_modify_srq;
2130f931551bSRalph Campbell 	ibdev->query_srq = qib_query_srq;
2131f931551bSRalph Campbell 	ibdev->destroy_srq = qib_destroy_srq;
2132f931551bSRalph Campbell 	ibdev->create_qp = qib_create_qp;
2133f931551bSRalph Campbell 	ibdev->modify_qp = qib_modify_qp;
2134f931551bSRalph Campbell 	ibdev->query_qp = qib_query_qp;
2135f931551bSRalph Campbell 	ibdev->destroy_qp = qib_destroy_qp;
2136f931551bSRalph Campbell 	ibdev->post_send = qib_post_send;
2137f931551bSRalph Campbell 	ibdev->post_recv = qib_post_receive;
2138f931551bSRalph Campbell 	ibdev->post_srq_recv = qib_post_srq_receive;
2139f931551bSRalph Campbell 	ibdev->create_cq = qib_create_cq;
2140f931551bSRalph Campbell 	ibdev->destroy_cq = qib_destroy_cq;
2141f931551bSRalph Campbell 	ibdev->resize_cq = qib_resize_cq;
2142f931551bSRalph Campbell 	ibdev->poll_cq = qib_poll_cq;
2143f931551bSRalph Campbell 	ibdev->req_notify_cq = qib_req_notify_cq;
2144f931551bSRalph Campbell 	ibdev->get_dma_mr = qib_get_dma_mr;
2145f931551bSRalph Campbell 	ibdev->reg_phys_mr = qib_reg_phys_mr;
2146f931551bSRalph Campbell 	ibdev->reg_user_mr = qib_reg_user_mr;
2147f931551bSRalph Campbell 	ibdev->dereg_mr = qib_dereg_mr;
2148f931551bSRalph Campbell 	ibdev->alloc_fast_reg_mr = qib_alloc_fast_reg_mr;
2149f931551bSRalph Campbell 	ibdev->alloc_fast_reg_page_list = qib_alloc_fast_reg_page_list;
2150f931551bSRalph Campbell 	ibdev->free_fast_reg_page_list = qib_free_fast_reg_page_list;
2151f931551bSRalph Campbell 	ibdev->alloc_fmr = qib_alloc_fmr;
2152f931551bSRalph Campbell 	ibdev->map_phys_fmr = qib_map_phys_fmr;
2153f931551bSRalph Campbell 	ibdev->unmap_fmr = qib_unmap_fmr;
2154f931551bSRalph Campbell 	ibdev->dealloc_fmr = qib_dealloc_fmr;
2155f931551bSRalph Campbell 	ibdev->attach_mcast = qib_multicast_attach;
2156f931551bSRalph Campbell 	ibdev->detach_mcast = qib_multicast_detach;
2157f931551bSRalph Campbell 	ibdev->process_mad = qib_process_mad;
2158f931551bSRalph Campbell 	ibdev->mmap = qib_mmap;
2159f931551bSRalph Campbell 	ibdev->dma_ops = &qib_dma_mapping_ops;
2160f931551bSRalph Campbell 
2161f931551bSRalph Campbell 	snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
2162f931551bSRalph Campbell 		 QIB_IDSTR " %s", init_utsname()->nodename);
2163f931551bSRalph Campbell 
2164f931551bSRalph Campbell 	ret = ib_register_device(ibdev, qib_create_port_files);
2165f931551bSRalph Campbell 	if (ret)
2166f931551bSRalph Campbell 		goto err_reg;
2167f931551bSRalph Campbell 
2168f931551bSRalph Campbell 	ret = qib_create_agents(dev);
2169f931551bSRalph Campbell 	if (ret)
2170f931551bSRalph Campbell 		goto err_agents;
2171f931551bSRalph Campbell 
2172f931551bSRalph Campbell 	if (qib_verbs_register_sysfs(dd))
2173f931551bSRalph Campbell 		goto err_class;
2174f931551bSRalph Campbell 
2175f931551bSRalph Campbell 	goto bail;
2176f931551bSRalph Campbell 
2177f931551bSRalph Campbell err_class:
2178f931551bSRalph Campbell 	qib_free_agents(dev);
2179f931551bSRalph Campbell err_agents:
2180f931551bSRalph Campbell 	ib_unregister_device(ibdev);
2181f931551bSRalph Campbell err_reg:
2182f931551bSRalph Campbell err_tx:
2183f931551bSRalph Campbell 	while (!list_empty(&dev->txreq_free)) {
2184f931551bSRalph Campbell 		struct list_head *l = dev->txreq_free.next;
2185f931551bSRalph Campbell 		struct qib_verbs_txreq *tx;
2186f931551bSRalph Campbell 
2187f931551bSRalph Campbell 		list_del(l);
2188f931551bSRalph Campbell 		tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
2189f931551bSRalph Campbell 		kfree(tx);
2190f931551bSRalph Campbell 	}
2191f931551bSRalph Campbell 	if (ppd->sdma_descq_cnt)
2192f931551bSRalph Campbell 		dma_free_coherent(&dd->pcidev->dev,
2193f931551bSRalph Campbell 				  ppd->sdma_descq_cnt *
2194f931551bSRalph Campbell 					sizeof(struct qib_pio_header),
2195f931551bSRalph Campbell 				  dev->pio_hdrs, dev->pio_hdrs_phys);
2196f931551bSRalph Campbell err_hdrs:
2197f931551bSRalph Campbell 	free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size));
2198f931551bSRalph Campbell err_lk:
2199f931551bSRalph Campbell 	kfree(dev->qp_table);
2200f931551bSRalph Campbell err_qpt:
2201f931551bSRalph Campbell 	qib_dev_err(dd, "cannot register verbs: %d!\n", -ret);
2202f931551bSRalph Campbell bail:
2203f931551bSRalph Campbell 	return ret;
2204f931551bSRalph Campbell }
2205f931551bSRalph Campbell 
2206f931551bSRalph Campbell void qib_unregister_ib_device(struct qib_devdata *dd)
2207f931551bSRalph Campbell {
2208f931551bSRalph Campbell 	struct qib_ibdev *dev = &dd->verbs_dev;
2209f931551bSRalph Campbell 	struct ib_device *ibdev = &dev->ibdev;
2210f931551bSRalph Campbell 	u32 qps_inuse;
2211f931551bSRalph Campbell 	unsigned lk_tab_size;
2212f931551bSRalph Campbell 
2213f931551bSRalph Campbell 	qib_verbs_unregister_sysfs(dd);
2214f931551bSRalph Campbell 
2215f931551bSRalph Campbell 	qib_free_agents(dev);
2216f931551bSRalph Campbell 
2217f931551bSRalph Campbell 	ib_unregister_device(ibdev);
2218f931551bSRalph Campbell 
2219f931551bSRalph Campbell 	if (!list_empty(&dev->piowait))
2220f931551bSRalph Campbell 		qib_dev_err(dd, "piowait list not empty!\n");
2221f931551bSRalph Campbell 	if (!list_empty(&dev->dmawait))
2222f931551bSRalph Campbell 		qib_dev_err(dd, "dmawait list not empty!\n");
2223f931551bSRalph Campbell 	if (!list_empty(&dev->txwait))
2224f931551bSRalph Campbell 		qib_dev_err(dd, "txwait list not empty!\n");
2225f931551bSRalph Campbell 	if (!list_empty(&dev->memwait))
2226f931551bSRalph Campbell 		qib_dev_err(dd, "memwait list not empty!\n");
2227f931551bSRalph Campbell 	if (dev->dma_mr)
2228f931551bSRalph Campbell 		qib_dev_err(dd, "DMA MR not NULL!\n");
2229f931551bSRalph Campbell 
2230f931551bSRalph Campbell 	qps_inuse = qib_free_all_qps(dd);
2231f931551bSRalph Campbell 	if (qps_inuse)
2232f931551bSRalph Campbell 		qib_dev_err(dd, "QP memory leak! %u still in use\n",
2233f931551bSRalph Campbell 			    qps_inuse);
2234f931551bSRalph Campbell 
2235f931551bSRalph Campbell 	del_timer_sync(&dev->mem_timer);
2236f931551bSRalph Campbell 	qib_free_qpn_table(&dev->qpn_table);
2237f931551bSRalph Campbell 	while (!list_empty(&dev->txreq_free)) {
2238f931551bSRalph Campbell 		struct list_head *l = dev->txreq_free.next;
2239f931551bSRalph Campbell 		struct qib_verbs_txreq *tx;
2240f931551bSRalph Campbell 
2241f931551bSRalph Campbell 		list_del(l);
2242f931551bSRalph Campbell 		tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
2243f931551bSRalph Campbell 		kfree(tx);
2244f931551bSRalph Campbell 	}
2245f931551bSRalph Campbell 	if (dd->pport->sdma_descq_cnt)
2246f931551bSRalph Campbell 		dma_free_coherent(&dd->pcidev->dev,
2247f931551bSRalph Campbell 				  dd->pport->sdma_descq_cnt *
2248f931551bSRalph Campbell 					sizeof(struct qib_pio_header),
2249f931551bSRalph Campbell 				  dev->pio_hdrs, dev->pio_hdrs_phys);
2250f931551bSRalph Campbell 	lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
2251f931551bSRalph Campbell 	free_pages((unsigned long) dev->lk_table.table,
2252f931551bSRalph Campbell 		   get_order(lk_tab_size));
2253f931551bSRalph Campbell 	kfree(dev->qp_table);
2254f931551bSRalph Campbell }
2255