xref: /openbmc/linux/drivers/infiniband/core/verbs.c (revision 79f08d9e)
1 /*
2  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
3  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
4  * Copyright (c) 2004 Intel Corporation.  All rights reserved.
5  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
7  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8  * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
9  *
10  * This software is available to you under a choice of one of two
11  * licenses.  You may choose to be licensed under the terms of the GNU
12  * General Public License (GPL) Version 2, available from the file
13  * COPYING in the main directory of this source tree, or the
14  * OpenIB.org BSD license below:
15  *
16  *     Redistribution and use in source and binary forms, with or
17  *     without modification, are permitted provided that the following
18  *     conditions are met:
19  *
20  *      - Redistributions of source code must retain the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer.
23  *
24  *      - Redistributions in binary form must reproduce the above
25  *        copyright notice, this list of conditions and the following
26  *        disclaimer in the documentation and/or other materials
27  *        provided with the distribution.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36  * SOFTWARE.
37  */
38 
39 #include <linux/errno.h>
40 #include <linux/err.h>
41 #include <linux/export.h>
42 #include <linux/string.h>
43 #include <linux/slab.h>
44 
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_cache.h>
47 
48 int ib_rate_to_mult(enum ib_rate rate)
49 {
50 	switch (rate) {
51 	case IB_RATE_2_5_GBPS: return  1;
52 	case IB_RATE_5_GBPS:   return  2;
53 	case IB_RATE_10_GBPS:  return  4;
54 	case IB_RATE_20_GBPS:  return  8;
55 	case IB_RATE_30_GBPS:  return 12;
56 	case IB_RATE_40_GBPS:  return 16;
57 	case IB_RATE_60_GBPS:  return 24;
58 	case IB_RATE_80_GBPS:  return 32;
59 	case IB_RATE_120_GBPS: return 48;
60 	default:	       return -1;
61 	}
62 }
63 EXPORT_SYMBOL(ib_rate_to_mult);
64 
65 enum ib_rate mult_to_ib_rate(int mult)
66 {
67 	switch (mult) {
68 	case 1:  return IB_RATE_2_5_GBPS;
69 	case 2:  return IB_RATE_5_GBPS;
70 	case 4:  return IB_RATE_10_GBPS;
71 	case 8:  return IB_RATE_20_GBPS;
72 	case 12: return IB_RATE_30_GBPS;
73 	case 16: return IB_RATE_40_GBPS;
74 	case 24: return IB_RATE_60_GBPS;
75 	case 32: return IB_RATE_80_GBPS;
76 	case 48: return IB_RATE_120_GBPS;
77 	default: return IB_RATE_PORT_CURRENT;
78 	}
79 }
80 EXPORT_SYMBOL(mult_to_ib_rate);
81 
82 int ib_rate_to_mbps(enum ib_rate rate)
83 {
84 	switch (rate) {
85 	case IB_RATE_2_5_GBPS: return 2500;
86 	case IB_RATE_5_GBPS:   return 5000;
87 	case IB_RATE_10_GBPS:  return 10000;
88 	case IB_RATE_20_GBPS:  return 20000;
89 	case IB_RATE_30_GBPS:  return 30000;
90 	case IB_RATE_40_GBPS:  return 40000;
91 	case IB_RATE_60_GBPS:  return 60000;
92 	case IB_RATE_80_GBPS:  return 80000;
93 	case IB_RATE_120_GBPS: return 120000;
94 	case IB_RATE_14_GBPS:  return 14062;
95 	case IB_RATE_56_GBPS:  return 56250;
96 	case IB_RATE_112_GBPS: return 112500;
97 	case IB_RATE_168_GBPS: return 168750;
98 	case IB_RATE_25_GBPS:  return 25781;
99 	case IB_RATE_100_GBPS: return 103125;
100 	case IB_RATE_200_GBPS: return 206250;
101 	case IB_RATE_300_GBPS: return 309375;
102 	default:	       return -1;
103 	}
104 }
105 EXPORT_SYMBOL(ib_rate_to_mbps);
106 
107 enum rdma_transport_type
108 rdma_node_get_transport(enum rdma_node_type node_type)
109 {
110 	switch (node_type) {
111 	case RDMA_NODE_IB_CA:
112 	case RDMA_NODE_IB_SWITCH:
113 	case RDMA_NODE_IB_ROUTER:
114 		return RDMA_TRANSPORT_IB;
115 	case RDMA_NODE_RNIC:
116 		return RDMA_TRANSPORT_IWARP;
117 	case RDMA_NODE_USNIC:
118 		return RDMA_TRANSPORT_USNIC;
119 	default:
120 		BUG();
121 		return 0;
122 	}
123 }
124 EXPORT_SYMBOL(rdma_node_get_transport);
125 
126 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
127 {
128 	if (device->get_link_layer)
129 		return device->get_link_layer(device, port_num);
130 
131 	switch (rdma_node_get_transport(device->node_type)) {
132 	case RDMA_TRANSPORT_IB:
133 		return IB_LINK_LAYER_INFINIBAND;
134 	case RDMA_TRANSPORT_IWARP:
135 	case RDMA_TRANSPORT_USNIC:
136 		return IB_LINK_LAYER_ETHERNET;
137 	default:
138 		return IB_LINK_LAYER_UNSPECIFIED;
139 	}
140 }
141 EXPORT_SYMBOL(rdma_port_get_link_layer);
142 
143 /* Protection domains */
144 
145 struct ib_pd *ib_alloc_pd(struct ib_device *device)
146 {
147 	struct ib_pd *pd;
148 
149 	pd = device->alloc_pd(device, NULL, NULL);
150 
151 	if (!IS_ERR(pd)) {
152 		pd->device  = device;
153 		pd->uobject = NULL;
154 		atomic_set(&pd->usecnt, 0);
155 	}
156 
157 	return pd;
158 }
159 EXPORT_SYMBOL(ib_alloc_pd);
160 
161 int ib_dealloc_pd(struct ib_pd *pd)
162 {
163 	if (atomic_read(&pd->usecnt))
164 		return -EBUSY;
165 
166 	return pd->device->dealloc_pd(pd);
167 }
168 EXPORT_SYMBOL(ib_dealloc_pd);
169 
170 /* Address handles */
171 
172 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
173 {
174 	struct ib_ah *ah;
175 
176 	ah = pd->device->create_ah(pd, ah_attr);
177 
178 	if (!IS_ERR(ah)) {
179 		ah->device  = pd->device;
180 		ah->pd      = pd;
181 		ah->uobject = NULL;
182 		atomic_inc(&pd->usecnt);
183 	}
184 
185 	return ah;
186 }
187 EXPORT_SYMBOL(ib_create_ah);
188 
189 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
190 		       struct ib_grh *grh, struct ib_ah_attr *ah_attr)
191 {
192 	u32 flow_class;
193 	u16 gid_index;
194 	int ret;
195 
196 	memset(ah_attr, 0, sizeof *ah_attr);
197 	ah_attr->dlid = wc->slid;
198 	ah_attr->sl = wc->sl;
199 	ah_attr->src_path_bits = wc->dlid_path_bits;
200 	ah_attr->port_num = port_num;
201 
202 	if (wc->wc_flags & IB_WC_GRH) {
203 		ah_attr->ah_flags = IB_AH_GRH;
204 		ah_attr->grh.dgid = grh->sgid;
205 
206 		ret = ib_find_cached_gid(device, &grh->dgid, &port_num,
207 					 &gid_index);
208 		if (ret)
209 			return ret;
210 
211 		ah_attr->grh.sgid_index = (u8) gid_index;
212 		flow_class = be32_to_cpu(grh->version_tclass_flow);
213 		ah_attr->grh.flow_label = flow_class & 0xFFFFF;
214 		ah_attr->grh.hop_limit = 0xFF;
215 		ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF;
216 	}
217 	return 0;
218 }
219 EXPORT_SYMBOL(ib_init_ah_from_wc);
220 
221 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
222 				   struct ib_grh *grh, u8 port_num)
223 {
224 	struct ib_ah_attr ah_attr;
225 	int ret;
226 
227 	ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
228 	if (ret)
229 		return ERR_PTR(ret);
230 
231 	return ib_create_ah(pd, &ah_attr);
232 }
233 EXPORT_SYMBOL(ib_create_ah_from_wc);
234 
235 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
236 {
237 	return ah->device->modify_ah ?
238 		ah->device->modify_ah(ah, ah_attr) :
239 		-ENOSYS;
240 }
241 EXPORT_SYMBOL(ib_modify_ah);
242 
243 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
244 {
245 	return ah->device->query_ah ?
246 		ah->device->query_ah(ah, ah_attr) :
247 		-ENOSYS;
248 }
249 EXPORT_SYMBOL(ib_query_ah);
250 
251 int ib_destroy_ah(struct ib_ah *ah)
252 {
253 	struct ib_pd *pd;
254 	int ret;
255 
256 	pd = ah->pd;
257 	ret = ah->device->destroy_ah(ah);
258 	if (!ret)
259 		atomic_dec(&pd->usecnt);
260 
261 	return ret;
262 }
263 EXPORT_SYMBOL(ib_destroy_ah);
264 
265 /* Shared receive queues */
266 
267 struct ib_srq *ib_create_srq(struct ib_pd *pd,
268 			     struct ib_srq_init_attr *srq_init_attr)
269 {
270 	struct ib_srq *srq;
271 
272 	if (!pd->device->create_srq)
273 		return ERR_PTR(-ENOSYS);
274 
275 	srq = pd->device->create_srq(pd, srq_init_attr, NULL);
276 
277 	if (!IS_ERR(srq)) {
278 		srq->device    	   = pd->device;
279 		srq->pd        	   = pd;
280 		srq->uobject       = NULL;
281 		srq->event_handler = srq_init_attr->event_handler;
282 		srq->srq_context   = srq_init_attr->srq_context;
283 		srq->srq_type      = srq_init_attr->srq_type;
284 		if (srq->srq_type == IB_SRQT_XRC) {
285 			srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
286 			srq->ext.xrc.cq   = srq_init_attr->ext.xrc.cq;
287 			atomic_inc(&srq->ext.xrc.xrcd->usecnt);
288 			atomic_inc(&srq->ext.xrc.cq->usecnt);
289 		}
290 		atomic_inc(&pd->usecnt);
291 		atomic_set(&srq->usecnt, 0);
292 	}
293 
294 	return srq;
295 }
296 EXPORT_SYMBOL(ib_create_srq);
297 
298 int ib_modify_srq(struct ib_srq *srq,
299 		  struct ib_srq_attr *srq_attr,
300 		  enum ib_srq_attr_mask srq_attr_mask)
301 {
302 	return srq->device->modify_srq ?
303 		srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
304 		-ENOSYS;
305 }
306 EXPORT_SYMBOL(ib_modify_srq);
307 
308 int ib_query_srq(struct ib_srq *srq,
309 		 struct ib_srq_attr *srq_attr)
310 {
311 	return srq->device->query_srq ?
312 		srq->device->query_srq(srq, srq_attr) : -ENOSYS;
313 }
314 EXPORT_SYMBOL(ib_query_srq);
315 
316 int ib_destroy_srq(struct ib_srq *srq)
317 {
318 	struct ib_pd *pd;
319 	enum ib_srq_type srq_type;
320 	struct ib_xrcd *uninitialized_var(xrcd);
321 	struct ib_cq *uninitialized_var(cq);
322 	int ret;
323 
324 	if (atomic_read(&srq->usecnt))
325 		return -EBUSY;
326 
327 	pd = srq->pd;
328 	srq_type = srq->srq_type;
329 	if (srq_type == IB_SRQT_XRC) {
330 		xrcd = srq->ext.xrc.xrcd;
331 		cq = srq->ext.xrc.cq;
332 	}
333 
334 	ret = srq->device->destroy_srq(srq);
335 	if (!ret) {
336 		atomic_dec(&pd->usecnt);
337 		if (srq_type == IB_SRQT_XRC) {
338 			atomic_dec(&xrcd->usecnt);
339 			atomic_dec(&cq->usecnt);
340 		}
341 	}
342 
343 	return ret;
344 }
345 EXPORT_SYMBOL(ib_destroy_srq);
346 
347 /* Queue pairs */
348 
349 static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
350 {
351 	struct ib_qp *qp = context;
352 	unsigned long flags;
353 
354 	spin_lock_irqsave(&qp->device->event_handler_lock, flags);
355 	list_for_each_entry(event->element.qp, &qp->open_list, open_list)
356 		if (event->element.qp->event_handler)
357 			event->element.qp->event_handler(event, event->element.qp->qp_context);
358 	spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
359 }
360 
361 static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
362 {
363 	mutex_lock(&xrcd->tgt_qp_mutex);
364 	list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
365 	mutex_unlock(&xrcd->tgt_qp_mutex);
366 }
367 
368 static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
369 				  void (*event_handler)(struct ib_event *, void *),
370 				  void *qp_context)
371 {
372 	struct ib_qp *qp;
373 	unsigned long flags;
374 
375 	qp = kzalloc(sizeof *qp, GFP_KERNEL);
376 	if (!qp)
377 		return ERR_PTR(-ENOMEM);
378 
379 	qp->real_qp = real_qp;
380 	atomic_inc(&real_qp->usecnt);
381 	qp->device = real_qp->device;
382 	qp->event_handler = event_handler;
383 	qp->qp_context = qp_context;
384 	qp->qp_num = real_qp->qp_num;
385 	qp->qp_type = real_qp->qp_type;
386 
387 	spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
388 	list_add(&qp->open_list, &real_qp->open_list);
389 	spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
390 
391 	return qp;
392 }
393 
394 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
395 			 struct ib_qp_open_attr *qp_open_attr)
396 {
397 	struct ib_qp *qp, *real_qp;
398 
399 	if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
400 		return ERR_PTR(-EINVAL);
401 
402 	qp = ERR_PTR(-EINVAL);
403 	mutex_lock(&xrcd->tgt_qp_mutex);
404 	list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) {
405 		if (real_qp->qp_num == qp_open_attr->qp_num) {
406 			qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
407 					  qp_open_attr->qp_context);
408 			break;
409 		}
410 	}
411 	mutex_unlock(&xrcd->tgt_qp_mutex);
412 	return qp;
413 }
414 EXPORT_SYMBOL(ib_open_qp);
415 
416 struct ib_qp *ib_create_qp(struct ib_pd *pd,
417 			   struct ib_qp_init_attr *qp_init_attr)
418 {
419 	struct ib_qp *qp, *real_qp;
420 	struct ib_device *device;
421 
422 	device = pd ? pd->device : qp_init_attr->xrcd->device;
423 	qp = device->create_qp(pd, qp_init_attr, NULL);
424 
425 	if (!IS_ERR(qp)) {
426 		qp->device     = device;
427 		qp->real_qp    = qp;
428 		qp->uobject    = NULL;
429 		qp->qp_type    = qp_init_attr->qp_type;
430 
431 		atomic_set(&qp->usecnt, 0);
432 		if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
433 			qp->event_handler = __ib_shared_qp_event_handler;
434 			qp->qp_context = qp;
435 			qp->pd = NULL;
436 			qp->send_cq = qp->recv_cq = NULL;
437 			qp->srq = NULL;
438 			qp->xrcd = qp_init_attr->xrcd;
439 			atomic_inc(&qp_init_attr->xrcd->usecnt);
440 			INIT_LIST_HEAD(&qp->open_list);
441 
442 			real_qp = qp;
443 			qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
444 					  qp_init_attr->qp_context);
445 			if (!IS_ERR(qp))
446 				__ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
447 			else
448 				real_qp->device->destroy_qp(real_qp);
449 		} else {
450 			qp->event_handler = qp_init_attr->event_handler;
451 			qp->qp_context = qp_init_attr->qp_context;
452 			if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
453 				qp->recv_cq = NULL;
454 				qp->srq = NULL;
455 			} else {
456 				qp->recv_cq = qp_init_attr->recv_cq;
457 				atomic_inc(&qp_init_attr->recv_cq->usecnt);
458 				qp->srq = qp_init_attr->srq;
459 				if (qp->srq)
460 					atomic_inc(&qp_init_attr->srq->usecnt);
461 			}
462 
463 			qp->pd	    = pd;
464 			qp->send_cq = qp_init_attr->send_cq;
465 			qp->xrcd    = NULL;
466 
467 			atomic_inc(&pd->usecnt);
468 			atomic_inc(&qp_init_attr->send_cq->usecnt);
469 		}
470 	}
471 
472 	return qp;
473 }
474 EXPORT_SYMBOL(ib_create_qp);
475 
476 static const struct {
477 	int			valid;
478 	enum ib_qp_attr_mask	req_param[IB_QPT_MAX];
479 	enum ib_qp_attr_mask	opt_param[IB_QPT_MAX];
480 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
481 	[IB_QPS_RESET] = {
482 		[IB_QPS_RESET] = { .valid = 1 },
483 		[IB_QPS_INIT]  = {
484 			.valid = 1,
485 			.req_param = {
486 				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
487 						IB_QP_PORT			|
488 						IB_QP_QKEY),
489 				[IB_QPT_RAW_PACKET] = IB_QP_PORT,
490 				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
491 						IB_QP_PORT			|
492 						IB_QP_ACCESS_FLAGS),
493 				[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|
494 						IB_QP_PORT			|
495 						IB_QP_ACCESS_FLAGS),
496 				[IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX		|
497 						IB_QP_PORT			|
498 						IB_QP_ACCESS_FLAGS),
499 				[IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX		|
500 						IB_QP_PORT			|
501 						IB_QP_ACCESS_FLAGS),
502 				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
503 						IB_QP_QKEY),
504 				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
505 						IB_QP_QKEY),
506 			}
507 		},
508 	},
509 	[IB_QPS_INIT]  = {
510 		[IB_QPS_RESET] = { .valid = 1 },
511 		[IB_QPS_ERR] =   { .valid = 1 },
512 		[IB_QPS_INIT]  = {
513 			.valid = 1,
514 			.opt_param = {
515 				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
516 						IB_QP_PORT			|
517 						IB_QP_QKEY),
518 				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
519 						IB_QP_PORT			|
520 						IB_QP_ACCESS_FLAGS),
521 				[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|
522 						IB_QP_PORT			|
523 						IB_QP_ACCESS_FLAGS),
524 				[IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX		|
525 						IB_QP_PORT			|
526 						IB_QP_ACCESS_FLAGS),
527 				[IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX		|
528 						IB_QP_PORT			|
529 						IB_QP_ACCESS_FLAGS),
530 				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
531 						IB_QP_QKEY),
532 				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
533 						IB_QP_QKEY),
534 			}
535 		},
536 		[IB_QPS_RTR]   = {
537 			.valid = 1,
538 			.req_param = {
539 				[IB_QPT_UC]  = (IB_QP_AV			|
540 						IB_QP_PATH_MTU			|
541 						IB_QP_DEST_QPN			|
542 						IB_QP_RQ_PSN),
543 				[IB_QPT_RC]  = (IB_QP_AV			|
544 						IB_QP_PATH_MTU			|
545 						IB_QP_DEST_QPN			|
546 						IB_QP_RQ_PSN			|
547 						IB_QP_MAX_DEST_RD_ATOMIC	|
548 						IB_QP_MIN_RNR_TIMER),
549 				[IB_QPT_XRC_INI] = (IB_QP_AV			|
550 						IB_QP_PATH_MTU			|
551 						IB_QP_DEST_QPN			|
552 						IB_QP_RQ_PSN),
553 				[IB_QPT_XRC_TGT] = (IB_QP_AV			|
554 						IB_QP_PATH_MTU			|
555 						IB_QP_DEST_QPN			|
556 						IB_QP_RQ_PSN			|
557 						IB_QP_MAX_DEST_RD_ATOMIC	|
558 						IB_QP_MIN_RNR_TIMER),
559 			},
560 			.opt_param = {
561 				 [IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
562 						 IB_QP_QKEY),
563 				 [IB_QPT_UC]  = (IB_QP_ALT_PATH			|
564 						 IB_QP_ACCESS_FLAGS		|
565 						 IB_QP_PKEY_INDEX),
566 				 [IB_QPT_RC]  = (IB_QP_ALT_PATH			|
567 						 IB_QP_ACCESS_FLAGS		|
568 						 IB_QP_PKEY_INDEX),
569 				 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH		|
570 						 IB_QP_ACCESS_FLAGS		|
571 						 IB_QP_PKEY_INDEX),
572 				 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH		|
573 						 IB_QP_ACCESS_FLAGS		|
574 						 IB_QP_PKEY_INDEX),
575 				 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
576 						 IB_QP_QKEY),
577 				 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
578 						 IB_QP_QKEY),
579 			 }
580 		}
581 	},
582 	[IB_QPS_RTR]   = {
583 		[IB_QPS_RESET] = { .valid = 1 },
584 		[IB_QPS_ERR] =   { .valid = 1 },
585 		[IB_QPS_RTS]   = {
586 			.valid = 1,
587 			.req_param = {
588 				[IB_QPT_UD]  = IB_QP_SQ_PSN,
589 				[IB_QPT_UC]  = IB_QP_SQ_PSN,
590 				[IB_QPT_RC]  = (IB_QP_TIMEOUT			|
591 						IB_QP_RETRY_CNT			|
592 						IB_QP_RNR_RETRY			|
593 						IB_QP_SQ_PSN			|
594 						IB_QP_MAX_QP_RD_ATOMIC),
595 				[IB_QPT_XRC_INI] = (IB_QP_TIMEOUT		|
596 						IB_QP_RETRY_CNT			|
597 						IB_QP_RNR_RETRY			|
598 						IB_QP_SQ_PSN			|
599 						IB_QP_MAX_QP_RD_ATOMIC),
600 				[IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT		|
601 						IB_QP_SQ_PSN),
602 				[IB_QPT_SMI] = IB_QP_SQ_PSN,
603 				[IB_QPT_GSI] = IB_QP_SQ_PSN,
604 			},
605 			.opt_param = {
606 				 [IB_QPT_UD]  = (IB_QP_CUR_STATE		|
607 						 IB_QP_QKEY),
608 				 [IB_QPT_UC]  = (IB_QP_CUR_STATE		|
609 						 IB_QP_ALT_PATH			|
610 						 IB_QP_ACCESS_FLAGS		|
611 						 IB_QP_PATH_MIG_STATE),
612 				 [IB_QPT_RC]  = (IB_QP_CUR_STATE		|
613 						 IB_QP_ALT_PATH			|
614 						 IB_QP_ACCESS_FLAGS		|
615 						 IB_QP_MIN_RNR_TIMER		|
616 						 IB_QP_PATH_MIG_STATE),
617 				 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE		|
618 						 IB_QP_ALT_PATH			|
619 						 IB_QP_ACCESS_FLAGS		|
620 						 IB_QP_PATH_MIG_STATE),
621 				 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE		|
622 						 IB_QP_ALT_PATH			|
623 						 IB_QP_ACCESS_FLAGS		|
624 						 IB_QP_MIN_RNR_TIMER		|
625 						 IB_QP_PATH_MIG_STATE),
626 				 [IB_QPT_SMI] = (IB_QP_CUR_STATE		|
627 						 IB_QP_QKEY),
628 				 [IB_QPT_GSI] = (IB_QP_CUR_STATE		|
629 						 IB_QP_QKEY),
630 			 }
631 		}
632 	},
633 	[IB_QPS_RTS]   = {
634 		[IB_QPS_RESET] = { .valid = 1 },
635 		[IB_QPS_ERR] =   { .valid = 1 },
636 		[IB_QPS_RTS]   = {
637 			.valid = 1,
638 			.opt_param = {
639 				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
640 						IB_QP_QKEY),
641 				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
642 						IB_QP_ACCESS_FLAGS		|
643 						IB_QP_ALT_PATH			|
644 						IB_QP_PATH_MIG_STATE),
645 				[IB_QPT_RC]  = (IB_QP_CUR_STATE			|
646 						IB_QP_ACCESS_FLAGS		|
647 						IB_QP_ALT_PATH			|
648 						IB_QP_PATH_MIG_STATE		|
649 						IB_QP_MIN_RNR_TIMER),
650 				[IB_QPT_XRC_INI] = (IB_QP_CUR_STATE		|
651 						IB_QP_ACCESS_FLAGS		|
652 						IB_QP_ALT_PATH			|
653 						IB_QP_PATH_MIG_STATE),
654 				[IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE		|
655 						IB_QP_ACCESS_FLAGS		|
656 						IB_QP_ALT_PATH			|
657 						IB_QP_PATH_MIG_STATE		|
658 						IB_QP_MIN_RNR_TIMER),
659 				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
660 						IB_QP_QKEY),
661 				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
662 						IB_QP_QKEY),
663 			}
664 		},
665 		[IB_QPS_SQD]   = {
666 			.valid = 1,
667 			.opt_param = {
668 				[IB_QPT_UD]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
669 				[IB_QPT_UC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
670 				[IB_QPT_RC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
671 				[IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
672 				[IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
673 				[IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
674 				[IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
675 			}
676 		},
677 	},
678 	[IB_QPS_SQD]   = {
679 		[IB_QPS_RESET] = { .valid = 1 },
680 		[IB_QPS_ERR] =   { .valid = 1 },
681 		[IB_QPS_RTS]   = {
682 			.valid = 1,
683 			.opt_param = {
684 				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
685 						IB_QP_QKEY),
686 				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
687 						IB_QP_ALT_PATH			|
688 						IB_QP_ACCESS_FLAGS		|
689 						IB_QP_PATH_MIG_STATE),
690 				[IB_QPT_RC]  = (IB_QP_CUR_STATE			|
691 						IB_QP_ALT_PATH			|
692 						IB_QP_ACCESS_FLAGS		|
693 						IB_QP_MIN_RNR_TIMER		|
694 						IB_QP_PATH_MIG_STATE),
695 				[IB_QPT_XRC_INI] = (IB_QP_CUR_STATE		|
696 						IB_QP_ALT_PATH			|
697 						IB_QP_ACCESS_FLAGS		|
698 						IB_QP_PATH_MIG_STATE),
699 				[IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE		|
700 						IB_QP_ALT_PATH			|
701 						IB_QP_ACCESS_FLAGS		|
702 						IB_QP_MIN_RNR_TIMER		|
703 						IB_QP_PATH_MIG_STATE),
704 				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
705 						IB_QP_QKEY),
706 				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
707 						IB_QP_QKEY),
708 			}
709 		},
710 		[IB_QPS_SQD]   = {
711 			.valid = 1,
712 			.opt_param = {
713 				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
714 						IB_QP_QKEY),
715 				[IB_QPT_UC]  = (IB_QP_AV			|
716 						IB_QP_ALT_PATH			|
717 						IB_QP_ACCESS_FLAGS		|
718 						IB_QP_PKEY_INDEX		|
719 						IB_QP_PATH_MIG_STATE),
720 				[IB_QPT_RC]  = (IB_QP_PORT			|
721 						IB_QP_AV			|
722 						IB_QP_TIMEOUT			|
723 						IB_QP_RETRY_CNT			|
724 						IB_QP_RNR_RETRY			|
725 						IB_QP_MAX_QP_RD_ATOMIC		|
726 						IB_QP_MAX_DEST_RD_ATOMIC	|
727 						IB_QP_ALT_PATH			|
728 						IB_QP_ACCESS_FLAGS		|
729 						IB_QP_PKEY_INDEX		|
730 						IB_QP_MIN_RNR_TIMER		|
731 						IB_QP_PATH_MIG_STATE),
732 				[IB_QPT_XRC_INI] = (IB_QP_PORT			|
733 						IB_QP_AV			|
734 						IB_QP_TIMEOUT			|
735 						IB_QP_RETRY_CNT			|
736 						IB_QP_RNR_RETRY			|
737 						IB_QP_MAX_QP_RD_ATOMIC		|
738 						IB_QP_ALT_PATH			|
739 						IB_QP_ACCESS_FLAGS		|
740 						IB_QP_PKEY_INDEX		|
741 						IB_QP_PATH_MIG_STATE),
742 				[IB_QPT_XRC_TGT] = (IB_QP_PORT			|
743 						IB_QP_AV			|
744 						IB_QP_TIMEOUT			|
745 						IB_QP_MAX_DEST_RD_ATOMIC	|
746 						IB_QP_ALT_PATH			|
747 						IB_QP_ACCESS_FLAGS		|
748 						IB_QP_PKEY_INDEX		|
749 						IB_QP_MIN_RNR_TIMER		|
750 						IB_QP_PATH_MIG_STATE),
751 				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
752 						IB_QP_QKEY),
753 				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
754 						IB_QP_QKEY),
755 			}
756 		}
757 	},
758 	[IB_QPS_SQE]   = {
759 		[IB_QPS_RESET] = { .valid = 1 },
760 		[IB_QPS_ERR] =   { .valid = 1 },
761 		[IB_QPS_RTS]   = {
762 			.valid = 1,
763 			.opt_param = {
764 				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
765 						IB_QP_QKEY),
766 				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
767 						IB_QP_ACCESS_FLAGS),
768 				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
769 						IB_QP_QKEY),
770 				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
771 						IB_QP_QKEY),
772 			}
773 		}
774 	},
775 	[IB_QPS_ERR] = {
776 		[IB_QPS_RESET] = { .valid = 1 },
777 		[IB_QPS_ERR] =   { .valid = 1 }
778 	}
779 };
780 
781 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
782 		       enum ib_qp_type type, enum ib_qp_attr_mask mask)
783 {
784 	enum ib_qp_attr_mask req_param, opt_param;
785 
786 	if (cur_state  < 0 || cur_state  > IB_QPS_ERR ||
787 	    next_state < 0 || next_state > IB_QPS_ERR)
788 		return 0;
789 
790 	if (mask & IB_QP_CUR_STATE  &&
791 	    cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
792 	    cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
793 		return 0;
794 
795 	if (!qp_state_table[cur_state][next_state].valid)
796 		return 0;
797 
798 	req_param = qp_state_table[cur_state][next_state].req_param[type];
799 	opt_param = qp_state_table[cur_state][next_state].opt_param[type];
800 
801 	if ((mask & req_param) != req_param)
802 		return 0;
803 
804 	if (mask & ~(req_param | opt_param | IB_QP_STATE))
805 		return 0;
806 
807 	return 1;
808 }
809 EXPORT_SYMBOL(ib_modify_qp_is_ok);
810 
811 int ib_modify_qp(struct ib_qp *qp,
812 		 struct ib_qp_attr *qp_attr,
813 		 int qp_attr_mask)
814 {
815 	return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
816 }
817 EXPORT_SYMBOL(ib_modify_qp);
818 
819 int ib_query_qp(struct ib_qp *qp,
820 		struct ib_qp_attr *qp_attr,
821 		int qp_attr_mask,
822 		struct ib_qp_init_attr *qp_init_attr)
823 {
824 	return qp->device->query_qp ?
825 		qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) :
826 		-ENOSYS;
827 }
828 EXPORT_SYMBOL(ib_query_qp);
829 
830 int ib_close_qp(struct ib_qp *qp)
831 {
832 	struct ib_qp *real_qp;
833 	unsigned long flags;
834 
835 	real_qp = qp->real_qp;
836 	if (real_qp == qp)
837 		return -EINVAL;
838 
839 	spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
840 	list_del(&qp->open_list);
841 	spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
842 
843 	atomic_dec(&real_qp->usecnt);
844 	kfree(qp);
845 
846 	return 0;
847 }
848 EXPORT_SYMBOL(ib_close_qp);
849 
850 static int __ib_destroy_shared_qp(struct ib_qp *qp)
851 {
852 	struct ib_xrcd *xrcd;
853 	struct ib_qp *real_qp;
854 	int ret;
855 
856 	real_qp = qp->real_qp;
857 	xrcd = real_qp->xrcd;
858 
859 	mutex_lock(&xrcd->tgt_qp_mutex);
860 	ib_close_qp(qp);
861 	if (atomic_read(&real_qp->usecnt) == 0)
862 		list_del(&real_qp->xrcd_list);
863 	else
864 		real_qp = NULL;
865 	mutex_unlock(&xrcd->tgt_qp_mutex);
866 
867 	if (real_qp) {
868 		ret = ib_destroy_qp(real_qp);
869 		if (!ret)
870 			atomic_dec(&xrcd->usecnt);
871 		else
872 			__ib_insert_xrcd_qp(xrcd, real_qp);
873 	}
874 
875 	return 0;
876 }
877 
878 int ib_destroy_qp(struct ib_qp *qp)
879 {
880 	struct ib_pd *pd;
881 	struct ib_cq *scq, *rcq;
882 	struct ib_srq *srq;
883 	int ret;
884 
885 	if (atomic_read(&qp->usecnt))
886 		return -EBUSY;
887 
888 	if (qp->real_qp != qp)
889 		return __ib_destroy_shared_qp(qp);
890 
891 	pd   = qp->pd;
892 	scq  = qp->send_cq;
893 	rcq  = qp->recv_cq;
894 	srq  = qp->srq;
895 
896 	ret = qp->device->destroy_qp(qp);
897 	if (!ret) {
898 		if (pd)
899 			atomic_dec(&pd->usecnt);
900 		if (scq)
901 			atomic_dec(&scq->usecnt);
902 		if (rcq)
903 			atomic_dec(&rcq->usecnt);
904 		if (srq)
905 			atomic_dec(&srq->usecnt);
906 	}
907 
908 	return ret;
909 }
910 EXPORT_SYMBOL(ib_destroy_qp);
911 
912 /* Completion queues */
913 
914 struct ib_cq *ib_create_cq(struct ib_device *device,
915 			   ib_comp_handler comp_handler,
916 			   void (*event_handler)(struct ib_event *, void *),
917 			   void *cq_context, int cqe, int comp_vector)
918 {
919 	struct ib_cq *cq;
920 
921 	cq = device->create_cq(device, cqe, comp_vector, NULL, NULL);
922 
923 	if (!IS_ERR(cq)) {
924 		cq->device        = device;
925 		cq->uobject       = NULL;
926 		cq->comp_handler  = comp_handler;
927 		cq->event_handler = event_handler;
928 		cq->cq_context    = cq_context;
929 		atomic_set(&cq->usecnt, 0);
930 	}
931 
932 	return cq;
933 }
934 EXPORT_SYMBOL(ib_create_cq);
935 
936 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
937 {
938 	return cq->device->modify_cq ?
939 		cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
940 }
941 EXPORT_SYMBOL(ib_modify_cq);
942 
943 int ib_destroy_cq(struct ib_cq *cq)
944 {
945 	if (atomic_read(&cq->usecnt))
946 		return -EBUSY;
947 
948 	return cq->device->destroy_cq(cq);
949 }
950 EXPORT_SYMBOL(ib_destroy_cq);
951 
952 int ib_resize_cq(struct ib_cq *cq, int cqe)
953 {
954 	return cq->device->resize_cq ?
955 		cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
956 }
957 EXPORT_SYMBOL(ib_resize_cq);
958 
959 /* Memory regions */
960 
961 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
962 {
963 	struct ib_mr *mr;
964 	int err;
965 
966 	err = ib_check_mr_access(mr_access_flags);
967 	if (err)
968 		return ERR_PTR(err);
969 
970 	mr = pd->device->get_dma_mr(pd, mr_access_flags);
971 
972 	if (!IS_ERR(mr)) {
973 		mr->device  = pd->device;
974 		mr->pd      = pd;
975 		mr->uobject = NULL;
976 		atomic_inc(&pd->usecnt);
977 		atomic_set(&mr->usecnt, 0);
978 	}
979 
980 	return mr;
981 }
982 EXPORT_SYMBOL(ib_get_dma_mr);
983 
984 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
985 			     struct ib_phys_buf *phys_buf_array,
986 			     int num_phys_buf,
987 			     int mr_access_flags,
988 			     u64 *iova_start)
989 {
990 	struct ib_mr *mr;
991 	int err;
992 
993 	err = ib_check_mr_access(mr_access_flags);
994 	if (err)
995 		return ERR_PTR(err);
996 
997 	if (!pd->device->reg_phys_mr)
998 		return ERR_PTR(-ENOSYS);
999 
1000 	mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
1001 				     mr_access_flags, iova_start);
1002 
1003 	if (!IS_ERR(mr)) {
1004 		mr->device  = pd->device;
1005 		mr->pd      = pd;
1006 		mr->uobject = NULL;
1007 		atomic_inc(&pd->usecnt);
1008 		atomic_set(&mr->usecnt, 0);
1009 	}
1010 
1011 	return mr;
1012 }
1013 EXPORT_SYMBOL(ib_reg_phys_mr);
1014 
1015 int ib_rereg_phys_mr(struct ib_mr *mr,
1016 		     int mr_rereg_mask,
1017 		     struct ib_pd *pd,
1018 		     struct ib_phys_buf *phys_buf_array,
1019 		     int num_phys_buf,
1020 		     int mr_access_flags,
1021 		     u64 *iova_start)
1022 {
1023 	struct ib_pd *old_pd;
1024 	int ret;
1025 
1026 	ret = ib_check_mr_access(mr_access_flags);
1027 	if (ret)
1028 		return ret;
1029 
1030 	if (!mr->device->rereg_phys_mr)
1031 		return -ENOSYS;
1032 
1033 	if (atomic_read(&mr->usecnt))
1034 		return -EBUSY;
1035 
1036 	old_pd = mr->pd;
1037 
1038 	ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
1039 					phys_buf_array, num_phys_buf,
1040 					mr_access_flags, iova_start);
1041 
1042 	if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
1043 		atomic_dec(&old_pd->usecnt);
1044 		atomic_inc(&pd->usecnt);
1045 	}
1046 
1047 	return ret;
1048 }
1049 EXPORT_SYMBOL(ib_rereg_phys_mr);
1050 
1051 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
1052 {
1053 	return mr->device->query_mr ?
1054 		mr->device->query_mr(mr, mr_attr) : -ENOSYS;
1055 }
1056 EXPORT_SYMBOL(ib_query_mr);
1057 
1058 int ib_dereg_mr(struct ib_mr *mr)
1059 {
1060 	struct ib_pd *pd;
1061 	int ret;
1062 
1063 	if (atomic_read(&mr->usecnt))
1064 		return -EBUSY;
1065 
1066 	pd = mr->pd;
1067 	ret = mr->device->dereg_mr(mr);
1068 	if (!ret)
1069 		atomic_dec(&pd->usecnt);
1070 
1071 	return ret;
1072 }
1073 EXPORT_SYMBOL(ib_dereg_mr);
1074 
1075 struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
1076 {
1077 	struct ib_mr *mr;
1078 
1079 	if (!pd->device->alloc_fast_reg_mr)
1080 		return ERR_PTR(-ENOSYS);
1081 
1082 	mr = pd->device->alloc_fast_reg_mr(pd, max_page_list_len);
1083 
1084 	if (!IS_ERR(mr)) {
1085 		mr->device  = pd->device;
1086 		mr->pd      = pd;
1087 		mr->uobject = NULL;
1088 		atomic_inc(&pd->usecnt);
1089 		atomic_set(&mr->usecnt, 0);
1090 	}
1091 
1092 	return mr;
1093 }
1094 EXPORT_SYMBOL(ib_alloc_fast_reg_mr);
1095 
1096 struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(struct ib_device *device,
1097 							  int max_page_list_len)
1098 {
1099 	struct ib_fast_reg_page_list *page_list;
1100 
1101 	if (!device->alloc_fast_reg_page_list)
1102 		return ERR_PTR(-ENOSYS);
1103 
1104 	page_list = device->alloc_fast_reg_page_list(device, max_page_list_len);
1105 
1106 	if (!IS_ERR(page_list)) {
1107 		page_list->device = device;
1108 		page_list->max_page_list_len = max_page_list_len;
1109 	}
1110 
1111 	return page_list;
1112 }
1113 EXPORT_SYMBOL(ib_alloc_fast_reg_page_list);
1114 
1115 void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
1116 {
1117 	page_list->device->free_fast_reg_page_list(page_list);
1118 }
1119 EXPORT_SYMBOL(ib_free_fast_reg_page_list);
1120 
1121 /* Memory windows */
1122 
1123 struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
1124 {
1125 	struct ib_mw *mw;
1126 
1127 	if (!pd->device->alloc_mw)
1128 		return ERR_PTR(-ENOSYS);
1129 
1130 	mw = pd->device->alloc_mw(pd, type);
1131 	if (!IS_ERR(mw)) {
1132 		mw->device  = pd->device;
1133 		mw->pd      = pd;
1134 		mw->uobject = NULL;
1135 		mw->type    = type;
1136 		atomic_inc(&pd->usecnt);
1137 	}
1138 
1139 	return mw;
1140 }
1141 EXPORT_SYMBOL(ib_alloc_mw);
1142 
1143 int ib_dealloc_mw(struct ib_mw *mw)
1144 {
1145 	struct ib_pd *pd;
1146 	int ret;
1147 
1148 	pd = mw->pd;
1149 	ret = mw->device->dealloc_mw(mw);
1150 	if (!ret)
1151 		atomic_dec(&pd->usecnt);
1152 
1153 	return ret;
1154 }
1155 EXPORT_SYMBOL(ib_dealloc_mw);
1156 
1157 /* "Fast" memory regions */
1158 
1159 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1160 			    int mr_access_flags,
1161 			    struct ib_fmr_attr *fmr_attr)
1162 {
1163 	struct ib_fmr *fmr;
1164 
1165 	if (!pd->device->alloc_fmr)
1166 		return ERR_PTR(-ENOSYS);
1167 
1168 	fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
1169 	if (!IS_ERR(fmr)) {
1170 		fmr->device = pd->device;
1171 		fmr->pd     = pd;
1172 		atomic_inc(&pd->usecnt);
1173 	}
1174 
1175 	return fmr;
1176 }
1177 EXPORT_SYMBOL(ib_alloc_fmr);
1178 
1179 int ib_unmap_fmr(struct list_head *fmr_list)
1180 {
1181 	struct ib_fmr *fmr;
1182 
1183 	if (list_empty(fmr_list))
1184 		return 0;
1185 
1186 	fmr = list_entry(fmr_list->next, struct ib_fmr, list);
1187 	return fmr->device->unmap_fmr(fmr_list);
1188 }
1189 EXPORT_SYMBOL(ib_unmap_fmr);
1190 
1191 int ib_dealloc_fmr(struct ib_fmr *fmr)
1192 {
1193 	struct ib_pd *pd;
1194 	int ret;
1195 
1196 	pd = fmr->pd;
1197 	ret = fmr->device->dealloc_fmr(fmr);
1198 	if (!ret)
1199 		atomic_dec(&pd->usecnt);
1200 
1201 	return ret;
1202 }
1203 EXPORT_SYMBOL(ib_dealloc_fmr);
1204 
1205 /* Multicast groups */
1206 
1207 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1208 {
1209 	int ret;
1210 
1211 	if (!qp->device->attach_mcast)
1212 		return -ENOSYS;
1213 	if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
1214 		return -EINVAL;
1215 
1216 	ret = qp->device->attach_mcast(qp, gid, lid);
1217 	if (!ret)
1218 		atomic_inc(&qp->usecnt);
1219 	return ret;
1220 }
1221 EXPORT_SYMBOL(ib_attach_mcast);
1222 
1223 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1224 {
1225 	int ret;
1226 
1227 	if (!qp->device->detach_mcast)
1228 		return -ENOSYS;
1229 	if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
1230 		return -EINVAL;
1231 
1232 	ret = qp->device->detach_mcast(qp, gid, lid);
1233 	if (!ret)
1234 		atomic_dec(&qp->usecnt);
1235 	return ret;
1236 }
1237 EXPORT_SYMBOL(ib_detach_mcast);
1238 
1239 struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
1240 {
1241 	struct ib_xrcd *xrcd;
1242 
1243 	if (!device->alloc_xrcd)
1244 		return ERR_PTR(-ENOSYS);
1245 
1246 	xrcd = device->alloc_xrcd(device, NULL, NULL);
1247 	if (!IS_ERR(xrcd)) {
1248 		xrcd->device = device;
1249 		xrcd->inode = NULL;
1250 		atomic_set(&xrcd->usecnt, 0);
1251 		mutex_init(&xrcd->tgt_qp_mutex);
1252 		INIT_LIST_HEAD(&xrcd->tgt_qp_list);
1253 	}
1254 
1255 	return xrcd;
1256 }
1257 EXPORT_SYMBOL(ib_alloc_xrcd);
1258 
1259 int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
1260 {
1261 	struct ib_qp *qp;
1262 	int ret;
1263 
1264 	if (atomic_read(&xrcd->usecnt))
1265 		return -EBUSY;
1266 
1267 	while (!list_empty(&xrcd->tgt_qp_list)) {
1268 		qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
1269 		ret = ib_destroy_qp(qp);
1270 		if (ret)
1271 			return ret;
1272 	}
1273 
1274 	return xrcd->device->dealloc_xrcd(xrcd);
1275 }
1276 EXPORT_SYMBOL(ib_dealloc_xrcd);
1277 
1278 struct ib_flow *ib_create_flow(struct ib_qp *qp,
1279 			       struct ib_flow_attr *flow_attr,
1280 			       int domain)
1281 {
1282 	struct ib_flow *flow_id;
1283 	if (!qp->device->create_flow)
1284 		return ERR_PTR(-ENOSYS);
1285 
1286 	flow_id = qp->device->create_flow(qp, flow_attr, domain);
1287 	if (!IS_ERR(flow_id))
1288 		atomic_inc(&qp->usecnt);
1289 	return flow_id;
1290 }
1291 EXPORT_SYMBOL(ib_create_flow);
1292 
1293 int ib_destroy_flow(struct ib_flow *flow_id)
1294 {
1295 	int err;
1296 	struct ib_qp *qp = flow_id->qp;
1297 
1298 	err = qp->device->destroy_flow(flow_id);
1299 	if (!err)
1300 		atomic_dec(&qp->usecnt);
1301 	return err;
1302 }
1303 EXPORT_SYMBOL(ib_destroy_flow);
1304