xref: /openbmc/linux/drivers/infiniband/core/verbs.c (revision 22246614)
1 /*
2  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
3  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
4  * Copyright (c) 2004 Intel Corporation.  All rights reserved.
5  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
7  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8  * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
9  *
10  * This software is available to you under a choice of one of two
11  * licenses.  You may choose to be licensed under the terms of the GNU
12  * General Public License (GPL) Version 2, available from the file
13  * COPYING in the main directory of this source tree, or the
14  * OpenIB.org BSD license below:
15  *
16  *     Redistribution and use in source and binary forms, with or
17  *     without modification, are permitted provided that the following
18  *     conditions are met:
19  *
20  *      - Redistributions of source code must retain the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer.
23  *
24  *      - Redistributions in binary form must reproduce the above
25  *        copyright notice, this list of conditions and the following
26  *        disclaimer in the documentation and/or other materials
27  *        provided with the distribution.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36  * SOFTWARE.
37  *
38  * $Id: verbs.c 1349 2004-12-16 21:09:43Z roland $
39  */
40 
41 #include <linux/errno.h>
42 #include <linux/err.h>
43 #include <linux/string.h>
44 
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_cache.h>
47 
48 int ib_rate_to_mult(enum ib_rate rate)
49 {
50 	switch (rate) {
51 	case IB_RATE_2_5_GBPS: return  1;
52 	case IB_RATE_5_GBPS:   return  2;
53 	case IB_RATE_10_GBPS:  return  4;
54 	case IB_RATE_20_GBPS:  return  8;
55 	case IB_RATE_30_GBPS:  return 12;
56 	case IB_RATE_40_GBPS:  return 16;
57 	case IB_RATE_60_GBPS:  return 24;
58 	case IB_RATE_80_GBPS:  return 32;
59 	case IB_RATE_120_GBPS: return 48;
60 	default:	       return -1;
61 	}
62 }
63 EXPORT_SYMBOL(ib_rate_to_mult);
64 
65 enum ib_rate mult_to_ib_rate(int mult)
66 {
67 	switch (mult) {
68 	case 1:  return IB_RATE_2_5_GBPS;
69 	case 2:  return IB_RATE_5_GBPS;
70 	case 4:  return IB_RATE_10_GBPS;
71 	case 8:  return IB_RATE_20_GBPS;
72 	case 12: return IB_RATE_30_GBPS;
73 	case 16: return IB_RATE_40_GBPS;
74 	case 24: return IB_RATE_60_GBPS;
75 	case 32: return IB_RATE_80_GBPS;
76 	case 48: return IB_RATE_120_GBPS;
77 	default: return IB_RATE_PORT_CURRENT;
78 	}
79 }
80 EXPORT_SYMBOL(mult_to_ib_rate);
81 
82 enum rdma_transport_type
83 rdma_node_get_transport(enum rdma_node_type node_type)
84 {
85 	switch (node_type) {
86 	case RDMA_NODE_IB_CA:
87 	case RDMA_NODE_IB_SWITCH:
88 	case RDMA_NODE_IB_ROUTER:
89 		return RDMA_TRANSPORT_IB;
90 	case RDMA_NODE_RNIC:
91 		return RDMA_TRANSPORT_IWARP;
92 	default:
93 		BUG();
94 		return 0;
95 	}
96 }
97 EXPORT_SYMBOL(rdma_node_get_transport);
98 
99 /* Protection domains */
100 
101 struct ib_pd *ib_alloc_pd(struct ib_device *device)
102 {
103 	struct ib_pd *pd;
104 
105 	pd = device->alloc_pd(device, NULL, NULL);
106 
107 	if (!IS_ERR(pd)) {
108 		pd->device  = device;
109 		pd->uobject = NULL;
110 		atomic_set(&pd->usecnt, 0);
111 	}
112 
113 	return pd;
114 }
115 EXPORT_SYMBOL(ib_alloc_pd);
116 
117 int ib_dealloc_pd(struct ib_pd *pd)
118 {
119 	if (atomic_read(&pd->usecnt))
120 		return -EBUSY;
121 
122 	return pd->device->dealloc_pd(pd);
123 }
124 EXPORT_SYMBOL(ib_dealloc_pd);
125 
126 /* Address handles */
127 
128 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
129 {
130 	struct ib_ah *ah;
131 
132 	ah = pd->device->create_ah(pd, ah_attr);
133 
134 	if (!IS_ERR(ah)) {
135 		ah->device  = pd->device;
136 		ah->pd      = pd;
137 		ah->uobject = NULL;
138 		atomic_inc(&pd->usecnt);
139 	}
140 
141 	return ah;
142 }
143 EXPORT_SYMBOL(ib_create_ah);
144 
145 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
146 		       struct ib_grh *grh, struct ib_ah_attr *ah_attr)
147 {
148 	u32 flow_class;
149 	u16 gid_index;
150 	int ret;
151 
152 	memset(ah_attr, 0, sizeof *ah_attr);
153 	ah_attr->dlid = wc->slid;
154 	ah_attr->sl = wc->sl;
155 	ah_attr->src_path_bits = wc->dlid_path_bits;
156 	ah_attr->port_num = port_num;
157 
158 	if (wc->wc_flags & IB_WC_GRH) {
159 		ah_attr->ah_flags = IB_AH_GRH;
160 		ah_attr->grh.dgid = grh->sgid;
161 
162 		ret = ib_find_cached_gid(device, &grh->dgid, &port_num,
163 					 &gid_index);
164 		if (ret)
165 			return ret;
166 
167 		ah_attr->grh.sgid_index = (u8) gid_index;
168 		flow_class = be32_to_cpu(grh->version_tclass_flow);
169 		ah_attr->grh.flow_label = flow_class & 0xFFFFF;
170 		ah_attr->grh.hop_limit = 0xFF;
171 		ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF;
172 	}
173 	return 0;
174 }
175 EXPORT_SYMBOL(ib_init_ah_from_wc);
176 
177 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
178 				   struct ib_grh *grh, u8 port_num)
179 {
180 	struct ib_ah_attr ah_attr;
181 	int ret;
182 
183 	ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
184 	if (ret)
185 		return ERR_PTR(ret);
186 
187 	return ib_create_ah(pd, &ah_attr);
188 }
189 EXPORT_SYMBOL(ib_create_ah_from_wc);
190 
191 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
192 {
193 	return ah->device->modify_ah ?
194 		ah->device->modify_ah(ah, ah_attr) :
195 		-ENOSYS;
196 }
197 EXPORT_SYMBOL(ib_modify_ah);
198 
199 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
200 {
201 	return ah->device->query_ah ?
202 		ah->device->query_ah(ah, ah_attr) :
203 		-ENOSYS;
204 }
205 EXPORT_SYMBOL(ib_query_ah);
206 
207 int ib_destroy_ah(struct ib_ah *ah)
208 {
209 	struct ib_pd *pd;
210 	int ret;
211 
212 	pd = ah->pd;
213 	ret = ah->device->destroy_ah(ah);
214 	if (!ret)
215 		atomic_dec(&pd->usecnt);
216 
217 	return ret;
218 }
219 EXPORT_SYMBOL(ib_destroy_ah);
220 
221 /* Shared receive queues */
222 
223 struct ib_srq *ib_create_srq(struct ib_pd *pd,
224 			     struct ib_srq_init_attr *srq_init_attr)
225 {
226 	struct ib_srq *srq;
227 
228 	if (!pd->device->create_srq)
229 		return ERR_PTR(-ENOSYS);
230 
231 	srq = pd->device->create_srq(pd, srq_init_attr, NULL);
232 
233 	if (!IS_ERR(srq)) {
234 		srq->device    	   = pd->device;
235 		srq->pd        	   = pd;
236 		srq->uobject       = NULL;
237 		srq->event_handler = srq_init_attr->event_handler;
238 		srq->srq_context   = srq_init_attr->srq_context;
239 		atomic_inc(&pd->usecnt);
240 		atomic_set(&srq->usecnt, 0);
241 	}
242 
243 	return srq;
244 }
245 EXPORT_SYMBOL(ib_create_srq);
246 
247 int ib_modify_srq(struct ib_srq *srq,
248 		  struct ib_srq_attr *srq_attr,
249 		  enum ib_srq_attr_mask srq_attr_mask)
250 {
251 	return srq->device->modify_srq ?
252 		srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
253 		-ENOSYS;
254 }
255 EXPORT_SYMBOL(ib_modify_srq);
256 
257 int ib_query_srq(struct ib_srq *srq,
258 		 struct ib_srq_attr *srq_attr)
259 {
260 	return srq->device->query_srq ?
261 		srq->device->query_srq(srq, srq_attr) : -ENOSYS;
262 }
263 EXPORT_SYMBOL(ib_query_srq);
264 
265 int ib_destroy_srq(struct ib_srq *srq)
266 {
267 	struct ib_pd *pd;
268 	int ret;
269 
270 	if (atomic_read(&srq->usecnt))
271 		return -EBUSY;
272 
273 	pd = srq->pd;
274 
275 	ret = srq->device->destroy_srq(srq);
276 	if (!ret)
277 		atomic_dec(&pd->usecnt);
278 
279 	return ret;
280 }
281 EXPORT_SYMBOL(ib_destroy_srq);
282 
283 /* Queue pairs */
284 
285 struct ib_qp *ib_create_qp(struct ib_pd *pd,
286 			   struct ib_qp_init_attr *qp_init_attr)
287 {
288 	struct ib_qp *qp;
289 
290 	qp = pd->device->create_qp(pd, qp_init_attr, NULL);
291 
292 	if (!IS_ERR(qp)) {
293 		qp->device     	  = pd->device;
294 		qp->pd         	  = pd;
295 		qp->send_cq    	  = qp_init_attr->send_cq;
296 		qp->recv_cq    	  = qp_init_attr->recv_cq;
297 		qp->srq	       	  = qp_init_attr->srq;
298 		qp->uobject       = NULL;
299 		qp->event_handler = qp_init_attr->event_handler;
300 		qp->qp_context    = qp_init_attr->qp_context;
301 		qp->qp_type	  = qp_init_attr->qp_type;
302 		atomic_inc(&pd->usecnt);
303 		atomic_inc(&qp_init_attr->send_cq->usecnt);
304 		atomic_inc(&qp_init_attr->recv_cq->usecnt);
305 		if (qp_init_attr->srq)
306 			atomic_inc(&qp_init_attr->srq->usecnt);
307 	}
308 
309 	return qp;
310 }
311 EXPORT_SYMBOL(ib_create_qp);
312 
313 static const struct {
314 	int			valid;
315 	enum ib_qp_attr_mask	req_param[IB_QPT_RAW_ETY + 1];
316 	enum ib_qp_attr_mask	opt_param[IB_QPT_RAW_ETY + 1];
317 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
318 	[IB_QPS_RESET] = {
319 		[IB_QPS_RESET] = { .valid = 1 },
320 		[IB_QPS_ERR]   = { .valid = 1 },
321 		[IB_QPS_INIT]  = {
322 			.valid = 1,
323 			.req_param = {
324 				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
325 						IB_QP_PORT			|
326 						IB_QP_QKEY),
327 				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
328 						IB_QP_PORT			|
329 						IB_QP_ACCESS_FLAGS),
330 				[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|
331 						IB_QP_PORT			|
332 						IB_QP_ACCESS_FLAGS),
333 				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
334 						IB_QP_QKEY),
335 				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
336 						IB_QP_QKEY),
337 			}
338 		},
339 	},
340 	[IB_QPS_INIT]  = {
341 		[IB_QPS_RESET] = { .valid = 1 },
342 		[IB_QPS_ERR] =   { .valid = 1 },
343 		[IB_QPS_INIT]  = {
344 			.valid = 1,
345 			.opt_param = {
346 				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
347 						IB_QP_PORT			|
348 						IB_QP_QKEY),
349 				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
350 						IB_QP_PORT			|
351 						IB_QP_ACCESS_FLAGS),
352 				[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|
353 						IB_QP_PORT			|
354 						IB_QP_ACCESS_FLAGS),
355 				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
356 						IB_QP_QKEY),
357 				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
358 						IB_QP_QKEY),
359 			}
360 		},
361 		[IB_QPS_RTR]   = {
362 			.valid = 1,
363 			.req_param = {
364 				[IB_QPT_UC]  = (IB_QP_AV			|
365 						IB_QP_PATH_MTU			|
366 						IB_QP_DEST_QPN			|
367 						IB_QP_RQ_PSN),
368 				[IB_QPT_RC]  = (IB_QP_AV			|
369 						IB_QP_PATH_MTU			|
370 						IB_QP_DEST_QPN			|
371 						IB_QP_RQ_PSN			|
372 						IB_QP_MAX_DEST_RD_ATOMIC	|
373 						IB_QP_MIN_RNR_TIMER),
374 			},
375 			.opt_param = {
376 				 [IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
377 						 IB_QP_QKEY),
378 				 [IB_QPT_UC]  = (IB_QP_ALT_PATH			|
379 						 IB_QP_ACCESS_FLAGS		|
380 						 IB_QP_PKEY_INDEX),
381 				 [IB_QPT_RC]  = (IB_QP_ALT_PATH			|
382 						 IB_QP_ACCESS_FLAGS		|
383 						 IB_QP_PKEY_INDEX),
384 				 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
385 						 IB_QP_QKEY),
386 				 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
387 						 IB_QP_QKEY),
388 			 }
389 		}
390 	},
391 	[IB_QPS_RTR]   = {
392 		[IB_QPS_RESET] = { .valid = 1 },
393 		[IB_QPS_ERR] =   { .valid = 1 },
394 		[IB_QPS_RTS]   = {
395 			.valid = 1,
396 			.req_param = {
397 				[IB_QPT_UD]  = IB_QP_SQ_PSN,
398 				[IB_QPT_UC]  = IB_QP_SQ_PSN,
399 				[IB_QPT_RC]  = (IB_QP_TIMEOUT			|
400 						IB_QP_RETRY_CNT			|
401 						IB_QP_RNR_RETRY			|
402 						IB_QP_SQ_PSN			|
403 						IB_QP_MAX_QP_RD_ATOMIC),
404 				[IB_QPT_SMI] = IB_QP_SQ_PSN,
405 				[IB_QPT_GSI] = IB_QP_SQ_PSN,
406 			},
407 			.opt_param = {
408 				 [IB_QPT_UD]  = (IB_QP_CUR_STATE		|
409 						 IB_QP_QKEY),
410 				 [IB_QPT_UC]  = (IB_QP_CUR_STATE		|
411 						 IB_QP_ALT_PATH			|
412 						 IB_QP_ACCESS_FLAGS		|
413 						 IB_QP_PATH_MIG_STATE),
414 				 [IB_QPT_RC]  = (IB_QP_CUR_STATE		|
415 						 IB_QP_ALT_PATH			|
416 						 IB_QP_ACCESS_FLAGS		|
417 						 IB_QP_MIN_RNR_TIMER		|
418 						 IB_QP_PATH_MIG_STATE),
419 				 [IB_QPT_SMI] = (IB_QP_CUR_STATE		|
420 						 IB_QP_QKEY),
421 				 [IB_QPT_GSI] = (IB_QP_CUR_STATE		|
422 						 IB_QP_QKEY),
423 			 }
424 		}
425 	},
426 	[IB_QPS_RTS]   = {
427 		[IB_QPS_RESET] = { .valid = 1 },
428 		[IB_QPS_ERR] =   { .valid = 1 },
429 		[IB_QPS_RTS]   = {
430 			.valid = 1,
431 			.opt_param = {
432 				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
433 						IB_QP_QKEY),
434 				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
435 						IB_QP_ACCESS_FLAGS		|
436 						IB_QP_ALT_PATH			|
437 						IB_QP_PATH_MIG_STATE),
438 				[IB_QPT_RC]  = (IB_QP_CUR_STATE			|
439 						IB_QP_ACCESS_FLAGS		|
440 						IB_QP_ALT_PATH			|
441 						IB_QP_PATH_MIG_STATE		|
442 						IB_QP_MIN_RNR_TIMER),
443 				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
444 						IB_QP_QKEY),
445 				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
446 						IB_QP_QKEY),
447 			}
448 		},
449 		[IB_QPS_SQD]   = {
450 			.valid = 1,
451 			.opt_param = {
452 				[IB_QPT_UD]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
453 				[IB_QPT_UC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
454 				[IB_QPT_RC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
455 				[IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
456 				[IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
457 			}
458 		},
459 	},
460 	[IB_QPS_SQD]   = {
461 		[IB_QPS_RESET] = { .valid = 1 },
462 		[IB_QPS_ERR] =   { .valid = 1 },
463 		[IB_QPS_RTS]   = {
464 			.valid = 1,
465 			.opt_param = {
466 				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
467 						IB_QP_QKEY),
468 				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
469 						IB_QP_ALT_PATH			|
470 						IB_QP_ACCESS_FLAGS		|
471 						IB_QP_PATH_MIG_STATE),
472 				[IB_QPT_RC]  = (IB_QP_CUR_STATE			|
473 						IB_QP_ALT_PATH			|
474 						IB_QP_ACCESS_FLAGS		|
475 						IB_QP_MIN_RNR_TIMER		|
476 						IB_QP_PATH_MIG_STATE),
477 				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
478 						IB_QP_QKEY),
479 				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
480 						IB_QP_QKEY),
481 			}
482 		},
483 		[IB_QPS_SQD]   = {
484 			.valid = 1,
485 			.opt_param = {
486 				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
487 						IB_QP_QKEY),
488 				[IB_QPT_UC]  = (IB_QP_AV			|
489 						IB_QP_ALT_PATH			|
490 						IB_QP_ACCESS_FLAGS		|
491 						IB_QP_PKEY_INDEX		|
492 						IB_QP_PATH_MIG_STATE),
493 				[IB_QPT_RC]  = (IB_QP_PORT			|
494 						IB_QP_AV			|
495 						IB_QP_TIMEOUT			|
496 						IB_QP_RETRY_CNT			|
497 						IB_QP_RNR_RETRY			|
498 						IB_QP_MAX_QP_RD_ATOMIC		|
499 						IB_QP_MAX_DEST_RD_ATOMIC	|
500 						IB_QP_ALT_PATH			|
501 						IB_QP_ACCESS_FLAGS		|
502 						IB_QP_PKEY_INDEX		|
503 						IB_QP_MIN_RNR_TIMER		|
504 						IB_QP_PATH_MIG_STATE),
505 				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
506 						IB_QP_QKEY),
507 				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
508 						IB_QP_QKEY),
509 			}
510 		}
511 	},
512 	[IB_QPS_SQE]   = {
513 		[IB_QPS_RESET] = { .valid = 1 },
514 		[IB_QPS_ERR] =   { .valid = 1 },
515 		[IB_QPS_RTS]   = {
516 			.valid = 1,
517 			.opt_param = {
518 				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
519 						IB_QP_QKEY),
520 				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
521 						IB_QP_ACCESS_FLAGS),
522 				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
523 						IB_QP_QKEY),
524 				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
525 						IB_QP_QKEY),
526 			}
527 		}
528 	},
529 	[IB_QPS_ERR] = {
530 		[IB_QPS_RESET] = { .valid = 1 },
531 		[IB_QPS_ERR] =   { .valid = 1 }
532 	}
533 };
534 
535 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
536 		       enum ib_qp_type type, enum ib_qp_attr_mask mask)
537 {
538 	enum ib_qp_attr_mask req_param, opt_param;
539 
540 	if (cur_state  < 0 || cur_state  > IB_QPS_ERR ||
541 	    next_state < 0 || next_state > IB_QPS_ERR)
542 		return 0;
543 
544 	if (mask & IB_QP_CUR_STATE  &&
545 	    cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
546 	    cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
547 		return 0;
548 
549 	if (!qp_state_table[cur_state][next_state].valid)
550 		return 0;
551 
552 	req_param = qp_state_table[cur_state][next_state].req_param[type];
553 	opt_param = qp_state_table[cur_state][next_state].opt_param[type];
554 
555 	if ((mask & req_param) != req_param)
556 		return 0;
557 
558 	if (mask & ~(req_param | opt_param | IB_QP_STATE))
559 		return 0;
560 
561 	return 1;
562 }
563 EXPORT_SYMBOL(ib_modify_qp_is_ok);
564 
565 int ib_modify_qp(struct ib_qp *qp,
566 		 struct ib_qp_attr *qp_attr,
567 		 int qp_attr_mask)
568 {
569 	return qp->device->modify_qp(qp, qp_attr, qp_attr_mask, NULL);
570 }
571 EXPORT_SYMBOL(ib_modify_qp);
572 
573 int ib_query_qp(struct ib_qp *qp,
574 		struct ib_qp_attr *qp_attr,
575 		int qp_attr_mask,
576 		struct ib_qp_init_attr *qp_init_attr)
577 {
578 	return qp->device->query_qp ?
579 		qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) :
580 		-ENOSYS;
581 }
582 EXPORT_SYMBOL(ib_query_qp);
583 
584 int ib_destroy_qp(struct ib_qp *qp)
585 {
586 	struct ib_pd *pd;
587 	struct ib_cq *scq, *rcq;
588 	struct ib_srq *srq;
589 	int ret;
590 
591 	pd  = qp->pd;
592 	scq = qp->send_cq;
593 	rcq = qp->recv_cq;
594 	srq = qp->srq;
595 
596 	ret = qp->device->destroy_qp(qp);
597 	if (!ret) {
598 		atomic_dec(&pd->usecnt);
599 		atomic_dec(&scq->usecnt);
600 		atomic_dec(&rcq->usecnt);
601 		if (srq)
602 			atomic_dec(&srq->usecnt);
603 	}
604 
605 	return ret;
606 }
607 EXPORT_SYMBOL(ib_destroy_qp);
608 
609 /* Completion queues */
610 
611 struct ib_cq *ib_create_cq(struct ib_device *device,
612 			   ib_comp_handler comp_handler,
613 			   void (*event_handler)(struct ib_event *, void *),
614 			   void *cq_context, int cqe, int comp_vector)
615 {
616 	struct ib_cq *cq;
617 
618 	cq = device->create_cq(device, cqe, comp_vector, NULL, NULL);
619 
620 	if (!IS_ERR(cq)) {
621 		cq->device        = device;
622 		cq->uobject       = NULL;
623 		cq->comp_handler  = comp_handler;
624 		cq->event_handler = event_handler;
625 		cq->cq_context    = cq_context;
626 		atomic_set(&cq->usecnt, 0);
627 	}
628 
629 	return cq;
630 }
631 EXPORT_SYMBOL(ib_create_cq);
632 
633 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
634 {
635 	return cq->device->modify_cq ?
636 		cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
637 }
638 EXPORT_SYMBOL(ib_modify_cq);
639 
640 int ib_destroy_cq(struct ib_cq *cq)
641 {
642 	if (atomic_read(&cq->usecnt))
643 		return -EBUSY;
644 
645 	return cq->device->destroy_cq(cq);
646 }
647 EXPORT_SYMBOL(ib_destroy_cq);
648 
649 int ib_resize_cq(struct ib_cq *cq, int cqe)
650 {
651 	return cq->device->resize_cq ?
652 		cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
653 }
654 EXPORT_SYMBOL(ib_resize_cq);
655 
656 /* Memory regions */
657 
658 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
659 {
660 	struct ib_mr *mr;
661 
662 	mr = pd->device->get_dma_mr(pd, mr_access_flags);
663 
664 	if (!IS_ERR(mr)) {
665 		mr->device  = pd->device;
666 		mr->pd      = pd;
667 		mr->uobject = NULL;
668 		atomic_inc(&pd->usecnt);
669 		atomic_set(&mr->usecnt, 0);
670 	}
671 
672 	return mr;
673 }
674 EXPORT_SYMBOL(ib_get_dma_mr);
675 
676 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
677 			     struct ib_phys_buf *phys_buf_array,
678 			     int num_phys_buf,
679 			     int mr_access_flags,
680 			     u64 *iova_start)
681 {
682 	struct ib_mr *mr;
683 
684 	if (!pd->device->reg_phys_mr)
685 		return ERR_PTR(-ENOSYS);
686 
687 	mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
688 				     mr_access_flags, iova_start);
689 
690 	if (!IS_ERR(mr)) {
691 		mr->device  = pd->device;
692 		mr->pd      = pd;
693 		mr->uobject = NULL;
694 		atomic_inc(&pd->usecnt);
695 		atomic_set(&mr->usecnt, 0);
696 	}
697 
698 	return mr;
699 }
700 EXPORT_SYMBOL(ib_reg_phys_mr);
701 
702 int ib_rereg_phys_mr(struct ib_mr *mr,
703 		     int mr_rereg_mask,
704 		     struct ib_pd *pd,
705 		     struct ib_phys_buf *phys_buf_array,
706 		     int num_phys_buf,
707 		     int mr_access_flags,
708 		     u64 *iova_start)
709 {
710 	struct ib_pd *old_pd;
711 	int ret;
712 
713 	if (!mr->device->rereg_phys_mr)
714 		return -ENOSYS;
715 
716 	if (atomic_read(&mr->usecnt))
717 		return -EBUSY;
718 
719 	old_pd = mr->pd;
720 
721 	ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
722 					phys_buf_array, num_phys_buf,
723 					mr_access_flags, iova_start);
724 
725 	if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
726 		atomic_dec(&old_pd->usecnt);
727 		atomic_inc(&pd->usecnt);
728 	}
729 
730 	return ret;
731 }
732 EXPORT_SYMBOL(ib_rereg_phys_mr);
733 
734 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
735 {
736 	return mr->device->query_mr ?
737 		mr->device->query_mr(mr, mr_attr) : -ENOSYS;
738 }
739 EXPORT_SYMBOL(ib_query_mr);
740 
741 int ib_dereg_mr(struct ib_mr *mr)
742 {
743 	struct ib_pd *pd;
744 	int ret;
745 
746 	if (atomic_read(&mr->usecnt))
747 		return -EBUSY;
748 
749 	pd = mr->pd;
750 	ret = mr->device->dereg_mr(mr);
751 	if (!ret)
752 		atomic_dec(&pd->usecnt);
753 
754 	return ret;
755 }
756 EXPORT_SYMBOL(ib_dereg_mr);
757 
758 /* Memory windows */
759 
760 struct ib_mw *ib_alloc_mw(struct ib_pd *pd)
761 {
762 	struct ib_mw *mw;
763 
764 	if (!pd->device->alloc_mw)
765 		return ERR_PTR(-ENOSYS);
766 
767 	mw = pd->device->alloc_mw(pd);
768 	if (!IS_ERR(mw)) {
769 		mw->device  = pd->device;
770 		mw->pd      = pd;
771 		mw->uobject = NULL;
772 		atomic_inc(&pd->usecnt);
773 	}
774 
775 	return mw;
776 }
777 EXPORT_SYMBOL(ib_alloc_mw);
778 
779 int ib_dealloc_mw(struct ib_mw *mw)
780 {
781 	struct ib_pd *pd;
782 	int ret;
783 
784 	pd = mw->pd;
785 	ret = mw->device->dealloc_mw(mw);
786 	if (!ret)
787 		atomic_dec(&pd->usecnt);
788 
789 	return ret;
790 }
791 EXPORT_SYMBOL(ib_dealloc_mw);
792 
793 /* "Fast" memory regions */
794 
795 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
796 			    int mr_access_flags,
797 			    struct ib_fmr_attr *fmr_attr)
798 {
799 	struct ib_fmr *fmr;
800 
801 	if (!pd->device->alloc_fmr)
802 		return ERR_PTR(-ENOSYS);
803 
804 	fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
805 	if (!IS_ERR(fmr)) {
806 		fmr->device = pd->device;
807 		fmr->pd     = pd;
808 		atomic_inc(&pd->usecnt);
809 	}
810 
811 	return fmr;
812 }
813 EXPORT_SYMBOL(ib_alloc_fmr);
814 
815 int ib_unmap_fmr(struct list_head *fmr_list)
816 {
817 	struct ib_fmr *fmr;
818 
819 	if (list_empty(fmr_list))
820 		return 0;
821 
822 	fmr = list_entry(fmr_list->next, struct ib_fmr, list);
823 	return fmr->device->unmap_fmr(fmr_list);
824 }
825 EXPORT_SYMBOL(ib_unmap_fmr);
826 
827 int ib_dealloc_fmr(struct ib_fmr *fmr)
828 {
829 	struct ib_pd *pd;
830 	int ret;
831 
832 	pd = fmr->pd;
833 	ret = fmr->device->dealloc_fmr(fmr);
834 	if (!ret)
835 		atomic_dec(&pd->usecnt);
836 
837 	return ret;
838 }
839 EXPORT_SYMBOL(ib_dealloc_fmr);
840 
841 /* Multicast groups */
842 
843 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
844 {
845 	if (!qp->device->attach_mcast)
846 		return -ENOSYS;
847 	if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
848 		return -EINVAL;
849 
850 	return qp->device->attach_mcast(qp, gid, lid);
851 }
852 EXPORT_SYMBOL(ib_attach_mcast);
853 
854 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
855 {
856 	if (!qp->device->detach_mcast)
857 		return -ENOSYS;
858 	if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
859 		return -EINVAL;
860 
861 	return qp->device->detach_mcast(qp, gid, lid);
862 }
863 EXPORT_SYMBOL(ib_detach_mcast);
864