1 /* 2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 7 * 8 * This software is available to you under a choice of one of two 9 * licenses. You may choose to be licensed under the terms of the GNU 10 * General Public License (GPL) Version 2, available from the file 11 * COPYING in the main directory of this source tree, or the 12 * OpenIB.org BSD license below: 13 * 14 * Redistribution and use in source and binary forms, with or 15 * without modification, are permitted provided that the following 16 * conditions are met: 17 * 18 * - Redistributions of source code must retain the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer. 21 * 22 * - Redistributions in binary form must reproduce the above 23 * copyright notice, this list of conditions and the following 24 * disclaimer in the documentation and/or other materials 25 * provided with the distribution. 26 * 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 34 * SOFTWARE. 35 * 36 * $Id: verbs.c 1349 2004-12-16 21:09:43Z roland $ 37 */ 38 39 #include <linux/errno.h> 40 #include <linux/err.h> 41 42 #include <ib_verbs.h> 43 44 /* Protection domains */ 45 46 struct ib_pd *ib_alloc_pd(struct ib_device *device) 47 { 48 struct ib_pd *pd; 49 50 pd = device->alloc_pd(device); 51 52 if (!IS_ERR(pd)) { 53 pd->device = device; 54 atomic_set(&pd->usecnt, 0); 55 } 56 57 return pd; 58 } 59 EXPORT_SYMBOL(ib_alloc_pd); 60 61 int ib_dealloc_pd(struct ib_pd *pd) 62 { 63 if (atomic_read(&pd->usecnt)) 64 return -EBUSY; 65 66 return pd->device->dealloc_pd(pd); 67 } 68 EXPORT_SYMBOL(ib_dealloc_pd); 69 70 /* Address handles */ 71 72 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) 73 { 74 struct ib_ah *ah; 75 76 ah = pd->device->create_ah(pd, ah_attr); 77 78 if (!IS_ERR(ah)) { 79 ah->device = pd->device; 80 ah->pd = pd; 81 atomic_inc(&pd->usecnt); 82 } 83 84 return ah; 85 } 86 EXPORT_SYMBOL(ib_create_ah); 87 88 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) 89 { 90 return ah->device->modify_ah ? 91 ah->device->modify_ah(ah, ah_attr) : 92 -ENOSYS; 93 } 94 EXPORT_SYMBOL(ib_modify_ah); 95 96 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) 97 { 98 return ah->device->query_ah ? 99 ah->device->query_ah(ah, ah_attr) : 100 -ENOSYS; 101 } 102 EXPORT_SYMBOL(ib_query_ah); 103 104 int ib_destroy_ah(struct ib_ah *ah) 105 { 106 struct ib_pd *pd; 107 int ret; 108 109 pd = ah->pd; 110 ret = ah->device->destroy_ah(ah); 111 if (!ret) 112 atomic_dec(&pd->usecnt); 113 114 return ret; 115 } 116 EXPORT_SYMBOL(ib_destroy_ah); 117 118 /* Queue pairs */ 119 120 struct ib_qp *ib_create_qp(struct ib_pd *pd, 121 struct ib_qp_init_attr *qp_init_attr) 122 { 123 struct ib_qp *qp; 124 125 qp = pd->device->create_qp(pd, qp_init_attr); 126 127 if (!IS_ERR(qp)) { 128 qp->device = pd->device; 129 qp->pd = pd; 130 qp->send_cq = qp_init_attr->send_cq; 131 qp->recv_cq = qp_init_attr->recv_cq; 132 qp->srq = qp_init_attr->srq; 133 qp->event_handler = qp_init_attr->event_handler; 134 qp->qp_context = qp_init_attr->qp_context; 135 qp->qp_type = qp_init_attr->qp_type; 136 atomic_inc(&pd->usecnt); 137 atomic_inc(&qp_init_attr->send_cq->usecnt); 138 atomic_inc(&qp_init_attr->recv_cq->usecnt); 139 if (qp_init_attr->srq) 140 atomic_inc(&qp_init_attr->srq->usecnt); 141 } 142 143 return qp; 144 } 145 EXPORT_SYMBOL(ib_create_qp); 146 147 int ib_modify_qp(struct ib_qp *qp, 148 struct ib_qp_attr *qp_attr, 149 int qp_attr_mask) 150 { 151 return qp->device->modify_qp(qp, qp_attr, qp_attr_mask); 152 } 153 EXPORT_SYMBOL(ib_modify_qp); 154 155 int ib_query_qp(struct ib_qp *qp, 156 struct ib_qp_attr *qp_attr, 157 int qp_attr_mask, 158 struct ib_qp_init_attr *qp_init_attr) 159 { 160 return qp->device->query_qp ? 161 qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) : 162 -ENOSYS; 163 } 164 EXPORT_SYMBOL(ib_query_qp); 165 166 int ib_destroy_qp(struct ib_qp *qp) 167 { 168 struct ib_pd *pd; 169 struct ib_cq *scq, *rcq; 170 struct ib_srq *srq; 171 int ret; 172 173 pd = qp->pd; 174 scq = qp->send_cq; 175 rcq = qp->recv_cq; 176 srq = qp->srq; 177 178 ret = qp->device->destroy_qp(qp); 179 if (!ret) { 180 atomic_dec(&pd->usecnt); 181 atomic_dec(&scq->usecnt); 182 atomic_dec(&rcq->usecnt); 183 if (srq) 184 atomic_dec(&srq->usecnt); 185 } 186 187 return ret; 188 } 189 EXPORT_SYMBOL(ib_destroy_qp); 190 191 /* Completion queues */ 192 193 struct ib_cq *ib_create_cq(struct ib_device *device, 194 ib_comp_handler comp_handler, 195 void (*event_handler)(struct ib_event *, void *), 196 void *cq_context, int cqe) 197 { 198 struct ib_cq *cq; 199 200 cq = device->create_cq(device, cqe); 201 202 if (!IS_ERR(cq)) { 203 cq->device = device; 204 cq->comp_handler = comp_handler; 205 cq->event_handler = event_handler; 206 cq->cq_context = cq_context; 207 atomic_set(&cq->usecnt, 0); 208 } 209 210 return cq; 211 } 212 EXPORT_SYMBOL(ib_create_cq); 213 214 int ib_destroy_cq(struct ib_cq *cq) 215 { 216 if (atomic_read(&cq->usecnt)) 217 return -EBUSY; 218 219 return cq->device->destroy_cq(cq); 220 } 221 EXPORT_SYMBOL(ib_destroy_cq); 222 223 int ib_resize_cq(struct ib_cq *cq, 224 int cqe) 225 { 226 int ret; 227 228 if (!cq->device->resize_cq) 229 return -ENOSYS; 230 231 ret = cq->device->resize_cq(cq, &cqe); 232 if (!ret) 233 cq->cqe = cqe; 234 235 return ret; 236 } 237 EXPORT_SYMBOL(ib_resize_cq); 238 239 /* Memory regions */ 240 241 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags) 242 { 243 struct ib_mr *mr; 244 245 mr = pd->device->get_dma_mr(pd, mr_access_flags); 246 247 if (!IS_ERR(mr)) { 248 mr->device = pd->device; 249 mr->pd = pd; 250 atomic_inc(&pd->usecnt); 251 atomic_set(&mr->usecnt, 0); 252 } 253 254 return mr; 255 } 256 EXPORT_SYMBOL(ib_get_dma_mr); 257 258 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd, 259 struct ib_phys_buf *phys_buf_array, 260 int num_phys_buf, 261 int mr_access_flags, 262 u64 *iova_start) 263 { 264 struct ib_mr *mr; 265 266 mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf, 267 mr_access_flags, iova_start); 268 269 if (!IS_ERR(mr)) { 270 mr->device = pd->device; 271 mr->pd = pd; 272 atomic_inc(&pd->usecnt); 273 atomic_set(&mr->usecnt, 0); 274 } 275 276 return mr; 277 } 278 EXPORT_SYMBOL(ib_reg_phys_mr); 279 280 int ib_rereg_phys_mr(struct ib_mr *mr, 281 int mr_rereg_mask, 282 struct ib_pd *pd, 283 struct ib_phys_buf *phys_buf_array, 284 int num_phys_buf, 285 int mr_access_flags, 286 u64 *iova_start) 287 { 288 struct ib_pd *old_pd; 289 int ret; 290 291 if (!mr->device->rereg_phys_mr) 292 return -ENOSYS; 293 294 if (atomic_read(&mr->usecnt)) 295 return -EBUSY; 296 297 old_pd = mr->pd; 298 299 ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd, 300 phys_buf_array, num_phys_buf, 301 mr_access_flags, iova_start); 302 303 if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) { 304 atomic_dec(&old_pd->usecnt); 305 atomic_inc(&pd->usecnt); 306 } 307 308 return ret; 309 } 310 EXPORT_SYMBOL(ib_rereg_phys_mr); 311 312 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr) 313 { 314 return mr->device->query_mr ? 315 mr->device->query_mr(mr, mr_attr) : -ENOSYS; 316 } 317 EXPORT_SYMBOL(ib_query_mr); 318 319 int ib_dereg_mr(struct ib_mr *mr) 320 { 321 struct ib_pd *pd; 322 int ret; 323 324 if (atomic_read(&mr->usecnt)) 325 return -EBUSY; 326 327 pd = mr->pd; 328 ret = mr->device->dereg_mr(mr); 329 if (!ret) 330 atomic_dec(&pd->usecnt); 331 332 return ret; 333 } 334 EXPORT_SYMBOL(ib_dereg_mr); 335 336 /* Memory windows */ 337 338 struct ib_mw *ib_alloc_mw(struct ib_pd *pd) 339 { 340 struct ib_mw *mw; 341 342 if (!pd->device->alloc_mw) 343 return ERR_PTR(-ENOSYS); 344 345 mw = pd->device->alloc_mw(pd); 346 if (!IS_ERR(mw)) { 347 mw->device = pd->device; 348 mw->pd = pd; 349 atomic_inc(&pd->usecnt); 350 } 351 352 return mw; 353 } 354 EXPORT_SYMBOL(ib_alloc_mw); 355 356 int ib_dealloc_mw(struct ib_mw *mw) 357 { 358 struct ib_pd *pd; 359 int ret; 360 361 pd = mw->pd; 362 ret = mw->device->dealloc_mw(mw); 363 if (!ret) 364 atomic_dec(&pd->usecnt); 365 366 return ret; 367 } 368 EXPORT_SYMBOL(ib_dealloc_mw); 369 370 /* "Fast" memory regions */ 371 372 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, 373 int mr_access_flags, 374 struct ib_fmr_attr *fmr_attr) 375 { 376 struct ib_fmr *fmr; 377 378 if (!pd->device->alloc_fmr) 379 return ERR_PTR(-ENOSYS); 380 381 fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr); 382 if (!IS_ERR(fmr)) { 383 fmr->device = pd->device; 384 fmr->pd = pd; 385 atomic_inc(&pd->usecnt); 386 } 387 388 return fmr; 389 } 390 EXPORT_SYMBOL(ib_alloc_fmr); 391 392 int ib_unmap_fmr(struct list_head *fmr_list) 393 { 394 struct ib_fmr *fmr; 395 396 if (list_empty(fmr_list)) 397 return 0; 398 399 fmr = list_entry(fmr_list->next, struct ib_fmr, list); 400 return fmr->device->unmap_fmr(fmr_list); 401 } 402 EXPORT_SYMBOL(ib_unmap_fmr); 403 404 int ib_dealloc_fmr(struct ib_fmr *fmr) 405 { 406 struct ib_pd *pd; 407 int ret; 408 409 pd = fmr->pd; 410 ret = fmr->device->dealloc_fmr(fmr); 411 if (!ret) 412 atomic_dec(&pd->usecnt); 413 414 return ret; 415 } 416 EXPORT_SYMBOL(ib_dealloc_fmr); 417 418 /* Multicast groups */ 419 420 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) 421 { 422 return qp->device->attach_mcast ? 423 qp->device->attach_mcast(qp, gid, lid) : 424 -ENOSYS; 425 } 426 EXPORT_SYMBOL(ib_attach_mcast); 427 428 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) 429 { 430 return qp->device->detach_mcast ? 431 qp->device->detach_mcast(qp, gid, lid) : 432 -ENOSYS; 433 } 434 EXPORT_SYMBOL(ib_detach_mcast); 435