1 /* 2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 8 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. 9 * 10 * This software is available to you under a choice of one of two 11 * licenses. You may choose to be licensed under the terms of the GNU 12 * General Public License (GPL) Version 2, available from the file 13 * COPYING in the main directory of this source tree, or the 14 * OpenIB.org BSD license below: 15 * 16 * Redistribution and use in source and binary forms, with or 17 * without modification, are permitted provided that the following 18 * conditions are met: 19 * 20 * - Redistributions of source code must retain the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer. 23 * 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials 27 * provided with the distribution. 28 * 29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 36 * SOFTWARE. 37 */ 38 39 #include <linux/errno.h> 40 #include <linux/err.h> 41 #include <linux/export.h> 42 #include <linux/string.h> 43 #include <linux/slab.h> 44 45 #include <rdma/ib_verbs.h> 46 #include <rdma/ib_cache.h> 47 #include <rdma/ib_addr.h> 48 49 #include "core_priv.h" 50 51 static const char * const ib_events[] = { 52 [IB_EVENT_CQ_ERR] = "CQ error", 53 [IB_EVENT_QP_FATAL] = "QP fatal error", 54 [IB_EVENT_QP_REQ_ERR] = "QP request error", 55 [IB_EVENT_QP_ACCESS_ERR] = "QP access error", 56 [IB_EVENT_COMM_EST] = "communication established", 57 [IB_EVENT_SQ_DRAINED] = "send queue drained", 58 [IB_EVENT_PATH_MIG] = "path migration successful", 59 [IB_EVENT_PATH_MIG_ERR] = "path migration error", 60 [IB_EVENT_DEVICE_FATAL] = "device fatal error", 61 [IB_EVENT_PORT_ACTIVE] = "port active", 62 [IB_EVENT_PORT_ERR] = "port error", 63 [IB_EVENT_LID_CHANGE] = "LID change", 64 [IB_EVENT_PKEY_CHANGE] = "P_key change", 65 [IB_EVENT_SM_CHANGE] = "SM change", 66 [IB_EVENT_SRQ_ERR] = "SRQ error", 67 [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached", 68 [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached", 69 [IB_EVENT_CLIENT_REREGISTER] = "client reregister", 70 [IB_EVENT_GID_CHANGE] = "GID changed", 71 }; 72 73 const char *ib_event_msg(enum ib_event_type event) 74 { 75 size_t index = event; 76 77 return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ? 78 ib_events[index] : "unrecognized event"; 79 } 80 EXPORT_SYMBOL(ib_event_msg); 81 82 static const char * const wc_statuses[] = { 83 [IB_WC_SUCCESS] = "success", 84 [IB_WC_LOC_LEN_ERR] = "local length error", 85 [IB_WC_LOC_QP_OP_ERR] = "local QP operation error", 86 [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error", 87 [IB_WC_LOC_PROT_ERR] = "local protection error", 88 [IB_WC_WR_FLUSH_ERR] = "WR flushed", 89 [IB_WC_MW_BIND_ERR] = "memory management operation error", 90 [IB_WC_BAD_RESP_ERR] = "bad response error", 91 [IB_WC_LOC_ACCESS_ERR] = "local access error", 92 [IB_WC_REM_INV_REQ_ERR] = "invalid request error", 93 [IB_WC_REM_ACCESS_ERR] = "remote access error", 94 [IB_WC_REM_OP_ERR] = "remote operation error", 95 [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded", 96 [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded", 97 [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error", 98 [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request", 99 [IB_WC_REM_ABORT_ERR] = "operation aborted", 100 [IB_WC_INV_EECN_ERR] = "invalid EE context number", 101 [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state", 102 [IB_WC_FATAL_ERR] = "fatal error", 103 [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error", 104 [IB_WC_GENERAL_ERR] = "general error", 105 }; 106 107 const char *ib_wc_status_msg(enum ib_wc_status status) 108 { 109 size_t index = status; 110 111 return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ? 112 wc_statuses[index] : "unrecognized status"; 113 } 114 EXPORT_SYMBOL(ib_wc_status_msg); 115 116 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate) 117 { 118 switch (rate) { 119 case IB_RATE_2_5_GBPS: return 1; 120 case IB_RATE_5_GBPS: return 2; 121 case IB_RATE_10_GBPS: return 4; 122 case IB_RATE_20_GBPS: return 8; 123 case IB_RATE_30_GBPS: return 12; 124 case IB_RATE_40_GBPS: return 16; 125 case IB_RATE_60_GBPS: return 24; 126 case IB_RATE_80_GBPS: return 32; 127 case IB_RATE_120_GBPS: return 48; 128 default: return -1; 129 } 130 } 131 EXPORT_SYMBOL(ib_rate_to_mult); 132 133 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult) 134 { 135 switch (mult) { 136 case 1: return IB_RATE_2_5_GBPS; 137 case 2: return IB_RATE_5_GBPS; 138 case 4: return IB_RATE_10_GBPS; 139 case 8: return IB_RATE_20_GBPS; 140 case 12: return IB_RATE_30_GBPS; 141 case 16: return IB_RATE_40_GBPS; 142 case 24: return IB_RATE_60_GBPS; 143 case 32: return IB_RATE_80_GBPS; 144 case 48: return IB_RATE_120_GBPS; 145 default: return IB_RATE_PORT_CURRENT; 146 } 147 } 148 EXPORT_SYMBOL(mult_to_ib_rate); 149 150 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate) 151 { 152 switch (rate) { 153 case IB_RATE_2_5_GBPS: return 2500; 154 case IB_RATE_5_GBPS: return 5000; 155 case IB_RATE_10_GBPS: return 10000; 156 case IB_RATE_20_GBPS: return 20000; 157 case IB_RATE_30_GBPS: return 30000; 158 case IB_RATE_40_GBPS: return 40000; 159 case IB_RATE_60_GBPS: return 60000; 160 case IB_RATE_80_GBPS: return 80000; 161 case IB_RATE_120_GBPS: return 120000; 162 case IB_RATE_14_GBPS: return 14062; 163 case IB_RATE_56_GBPS: return 56250; 164 case IB_RATE_112_GBPS: return 112500; 165 case IB_RATE_168_GBPS: return 168750; 166 case IB_RATE_25_GBPS: return 25781; 167 case IB_RATE_100_GBPS: return 103125; 168 case IB_RATE_200_GBPS: return 206250; 169 case IB_RATE_300_GBPS: return 309375; 170 default: return -1; 171 } 172 } 173 EXPORT_SYMBOL(ib_rate_to_mbps); 174 175 __attribute_const__ enum rdma_transport_type 176 rdma_node_get_transport(enum rdma_node_type node_type) 177 { 178 switch (node_type) { 179 case RDMA_NODE_IB_CA: 180 case RDMA_NODE_IB_SWITCH: 181 case RDMA_NODE_IB_ROUTER: 182 return RDMA_TRANSPORT_IB; 183 case RDMA_NODE_RNIC: 184 return RDMA_TRANSPORT_IWARP; 185 case RDMA_NODE_USNIC: 186 return RDMA_TRANSPORT_USNIC; 187 case RDMA_NODE_USNIC_UDP: 188 return RDMA_TRANSPORT_USNIC_UDP; 189 default: 190 BUG(); 191 return 0; 192 } 193 } 194 EXPORT_SYMBOL(rdma_node_get_transport); 195 196 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num) 197 { 198 if (device->get_link_layer) 199 return device->get_link_layer(device, port_num); 200 201 switch (rdma_node_get_transport(device->node_type)) { 202 case RDMA_TRANSPORT_IB: 203 return IB_LINK_LAYER_INFINIBAND; 204 case RDMA_TRANSPORT_IWARP: 205 case RDMA_TRANSPORT_USNIC: 206 case RDMA_TRANSPORT_USNIC_UDP: 207 return IB_LINK_LAYER_ETHERNET; 208 default: 209 return IB_LINK_LAYER_UNSPECIFIED; 210 } 211 } 212 EXPORT_SYMBOL(rdma_port_get_link_layer); 213 214 /* Protection domains */ 215 216 /** 217 * ib_alloc_pd - Allocates an unused protection domain. 218 * @device: The device on which to allocate the protection domain. 219 * 220 * A protection domain object provides an association between QPs, shared 221 * receive queues, address handles, memory regions, and memory windows. 222 * 223 * Every PD has a local_dma_lkey which can be used as the lkey value for local 224 * memory operations. 225 */ 226 struct ib_pd *ib_alloc_pd(struct ib_device *device) 227 { 228 struct ib_pd *pd; 229 struct ib_device_attr devattr; 230 int rc; 231 232 rc = ib_query_device(device, &devattr); 233 if (rc) 234 return ERR_PTR(rc); 235 236 pd = device->alloc_pd(device, NULL, NULL); 237 if (IS_ERR(pd)) 238 return pd; 239 240 pd->device = device; 241 pd->uobject = NULL; 242 pd->local_mr = NULL; 243 atomic_set(&pd->usecnt, 0); 244 245 if (devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) 246 pd->local_dma_lkey = device->local_dma_lkey; 247 else { 248 struct ib_mr *mr; 249 250 mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE); 251 if (IS_ERR(mr)) { 252 ib_dealloc_pd(pd); 253 return (struct ib_pd *)mr; 254 } 255 256 pd->local_mr = mr; 257 pd->local_dma_lkey = pd->local_mr->lkey; 258 } 259 return pd; 260 } 261 EXPORT_SYMBOL(ib_alloc_pd); 262 263 /** 264 * ib_dealloc_pd - Deallocates a protection domain. 265 * @pd: The protection domain to deallocate. 266 * 267 * It is an error to call this function while any resources in the pd still 268 * exist. The caller is responsible to synchronously destroy them and 269 * guarantee no new allocations will happen. 270 */ 271 void ib_dealloc_pd(struct ib_pd *pd) 272 { 273 int ret; 274 275 if (pd->local_mr) { 276 ret = ib_dereg_mr(pd->local_mr); 277 WARN_ON(ret); 278 pd->local_mr = NULL; 279 } 280 281 /* uverbs manipulates usecnt with proper locking, while the kabi 282 requires the caller to guarantee we can't race here. */ 283 WARN_ON(atomic_read(&pd->usecnt)); 284 285 /* Making delalloc_pd a void return is a WIP, no driver should return 286 an error here. */ 287 ret = pd->device->dealloc_pd(pd); 288 WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd"); 289 } 290 EXPORT_SYMBOL(ib_dealloc_pd); 291 292 /* Address handles */ 293 294 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) 295 { 296 struct ib_ah *ah; 297 298 ah = pd->device->create_ah(pd, ah_attr); 299 300 if (!IS_ERR(ah)) { 301 ah->device = pd->device; 302 ah->pd = pd; 303 ah->uobject = NULL; 304 atomic_inc(&pd->usecnt); 305 } 306 307 return ah; 308 } 309 EXPORT_SYMBOL(ib_create_ah); 310 311 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, 312 const struct ib_wc *wc, const struct ib_grh *grh, 313 struct ib_ah_attr *ah_attr) 314 { 315 u32 flow_class; 316 u16 gid_index; 317 int ret; 318 319 memset(ah_attr, 0, sizeof *ah_attr); 320 if (rdma_cap_eth_ah(device, port_num)) { 321 if (!(wc->wc_flags & IB_WC_GRH)) 322 return -EPROTOTYPE; 323 324 if (wc->wc_flags & IB_WC_WITH_SMAC && 325 wc->wc_flags & IB_WC_WITH_VLAN) { 326 memcpy(ah_attr->dmac, wc->smac, ETH_ALEN); 327 ah_attr->vlan_id = wc->vlan_id; 328 } else { 329 ret = rdma_addr_find_dmac_by_grh(&grh->dgid, &grh->sgid, 330 ah_attr->dmac, &ah_attr->vlan_id); 331 if (ret) 332 return ret; 333 } 334 } else { 335 ah_attr->vlan_id = 0xffff; 336 } 337 338 ah_attr->dlid = wc->slid; 339 ah_attr->sl = wc->sl; 340 ah_attr->src_path_bits = wc->dlid_path_bits; 341 ah_attr->port_num = port_num; 342 343 if (wc->wc_flags & IB_WC_GRH) { 344 ah_attr->ah_flags = IB_AH_GRH; 345 ah_attr->grh.dgid = grh->sgid; 346 347 ret = ib_find_cached_gid(device, &grh->dgid, &port_num, 348 &gid_index); 349 if (ret) 350 return ret; 351 352 ah_attr->grh.sgid_index = (u8) gid_index; 353 flow_class = be32_to_cpu(grh->version_tclass_flow); 354 ah_attr->grh.flow_label = flow_class & 0xFFFFF; 355 ah_attr->grh.hop_limit = 0xFF; 356 ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF; 357 } 358 return 0; 359 } 360 EXPORT_SYMBOL(ib_init_ah_from_wc); 361 362 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, 363 const struct ib_grh *grh, u8 port_num) 364 { 365 struct ib_ah_attr ah_attr; 366 int ret; 367 368 ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr); 369 if (ret) 370 return ERR_PTR(ret); 371 372 return ib_create_ah(pd, &ah_attr); 373 } 374 EXPORT_SYMBOL(ib_create_ah_from_wc); 375 376 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) 377 { 378 return ah->device->modify_ah ? 379 ah->device->modify_ah(ah, ah_attr) : 380 -ENOSYS; 381 } 382 EXPORT_SYMBOL(ib_modify_ah); 383 384 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) 385 { 386 return ah->device->query_ah ? 387 ah->device->query_ah(ah, ah_attr) : 388 -ENOSYS; 389 } 390 EXPORT_SYMBOL(ib_query_ah); 391 392 int ib_destroy_ah(struct ib_ah *ah) 393 { 394 struct ib_pd *pd; 395 int ret; 396 397 pd = ah->pd; 398 ret = ah->device->destroy_ah(ah); 399 if (!ret) 400 atomic_dec(&pd->usecnt); 401 402 return ret; 403 } 404 EXPORT_SYMBOL(ib_destroy_ah); 405 406 /* Shared receive queues */ 407 408 struct ib_srq *ib_create_srq(struct ib_pd *pd, 409 struct ib_srq_init_attr *srq_init_attr) 410 { 411 struct ib_srq *srq; 412 413 if (!pd->device->create_srq) 414 return ERR_PTR(-ENOSYS); 415 416 srq = pd->device->create_srq(pd, srq_init_attr, NULL); 417 418 if (!IS_ERR(srq)) { 419 srq->device = pd->device; 420 srq->pd = pd; 421 srq->uobject = NULL; 422 srq->event_handler = srq_init_attr->event_handler; 423 srq->srq_context = srq_init_attr->srq_context; 424 srq->srq_type = srq_init_attr->srq_type; 425 if (srq->srq_type == IB_SRQT_XRC) { 426 srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd; 427 srq->ext.xrc.cq = srq_init_attr->ext.xrc.cq; 428 atomic_inc(&srq->ext.xrc.xrcd->usecnt); 429 atomic_inc(&srq->ext.xrc.cq->usecnt); 430 } 431 atomic_inc(&pd->usecnt); 432 atomic_set(&srq->usecnt, 0); 433 } 434 435 return srq; 436 } 437 EXPORT_SYMBOL(ib_create_srq); 438 439 int ib_modify_srq(struct ib_srq *srq, 440 struct ib_srq_attr *srq_attr, 441 enum ib_srq_attr_mask srq_attr_mask) 442 { 443 return srq->device->modify_srq ? 444 srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) : 445 -ENOSYS; 446 } 447 EXPORT_SYMBOL(ib_modify_srq); 448 449 int ib_query_srq(struct ib_srq *srq, 450 struct ib_srq_attr *srq_attr) 451 { 452 return srq->device->query_srq ? 453 srq->device->query_srq(srq, srq_attr) : -ENOSYS; 454 } 455 EXPORT_SYMBOL(ib_query_srq); 456 457 int ib_destroy_srq(struct ib_srq *srq) 458 { 459 struct ib_pd *pd; 460 enum ib_srq_type srq_type; 461 struct ib_xrcd *uninitialized_var(xrcd); 462 struct ib_cq *uninitialized_var(cq); 463 int ret; 464 465 if (atomic_read(&srq->usecnt)) 466 return -EBUSY; 467 468 pd = srq->pd; 469 srq_type = srq->srq_type; 470 if (srq_type == IB_SRQT_XRC) { 471 xrcd = srq->ext.xrc.xrcd; 472 cq = srq->ext.xrc.cq; 473 } 474 475 ret = srq->device->destroy_srq(srq); 476 if (!ret) { 477 atomic_dec(&pd->usecnt); 478 if (srq_type == IB_SRQT_XRC) { 479 atomic_dec(&xrcd->usecnt); 480 atomic_dec(&cq->usecnt); 481 } 482 } 483 484 return ret; 485 } 486 EXPORT_SYMBOL(ib_destroy_srq); 487 488 /* Queue pairs */ 489 490 static void __ib_shared_qp_event_handler(struct ib_event *event, void *context) 491 { 492 struct ib_qp *qp = context; 493 unsigned long flags; 494 495 spin_lock_irqsave(&qp->device->event_handler_lock, flags); 496 list_for_each_entry(event->element.qp, &qp->open_list, open_list) 497 if (event->element.qp->event_handler) 498 event->element.qp->event_handler(event, event->element.qp->qp_context); 499 spin_unlock_irqrestore(&qp->device->event_handler_lock, flags); 500 } 501 502 static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) 503 { 504 mutex_lock(&xrcd->tgt_qp_mutex); 505 list_add(&qp->xrcd_list, &xrcd->tgt_qp_list); 506 mutex_unlock(&xrcd->tgt_qp_mutex); 507 } 508 509 static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp, 510 void (*event_handler)(struct ib_event *, void *), 511 void *qp_context) 512 { 513 struct ib_qp *qp; 514 unsigned long flags; 515 516 qp = kzalloc(sizeof *qp, GFP_KERNEL); 517 if (!qp) 518 return ERR_PTR(-ENOMEM); 519 520 qp->real_qp = real_qp; 521 atomic_inc(&real_qp->usecnt); 522 qp->device = real_qp->device; 523 qp->event_handler = event_handler; 524 qp->qp_context = qp_context; 525 qp->qp_num = real_qp->qp_num; 526 qp->qp_type = real_qp->qp_type; 527 528 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags); 529 list_add(&qp->open_list, &real_qp->open_list); 530 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); 531 532 return qp; 533 } 534 535 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, 536 struct ib_qp_open_attr *qp_open_attr) 537 { 538 struct ib_qp *qp, *real_qp; 539 540 if (qp_open_attr->qp_type != IB_QPT_XRC_TGT) 541 return ERR_PTR(-EINVAL); 542 543 qp = ERR_PTR(-EINVAL); 544 mutex_lock(&xrcd->tgt_qp_mutex); 545 list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) { 546 if (real_qp->qp_num == qp_open_attr->qp_num) { 547 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler, 548 qp_open_attr->qp_context); 549 break; 550 } 551 } 552 mutex_unlock(&xrcd->tgt_qp_mutex); 553 return qp; 554 } 555 EXPORT_SYMBOL(ib_open_qp); 556 557 struct ib_qp *ib_create_qp(struct ib_pd *pd, 558 struct ib_qp_init_attr *qp_init_attr) 559 { 560 struct ib_qp *qp, *real_qp; 561 struct ib_device *device; 562 563 device = pd ? pd->device : qp_init_attr->xrcd->device; 564 qp = device->create_qp(pd, qp_init_attr, NULL); 565 566 if (!IS_ERR(qp)) { 567 qp->device = device; 568 qp->real_qp = qp; 569 qp->uobject = NULL; 570 qp->qp_type = qp_init_attr->qp_type; 571 572 atomic_set(&qp->usecnt, 0); 573 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) { 574 qp->event_handler = __ib_shared_qp_event_handler; 575 qp->qp_context = qp; 576 qp->pd = NULL; 577 qp->send_cq = qp->recv_cq = NULL; 578 qp->srq = NULL; 579 qp->xrcd = qp_init_attr->xrcd; 580 atomic_inc(&qp_init_attr->xrcd->usecnt); 581 INIT_LIST_HEAD(&qp->open_list); 582 583 real_qp = qp; 584 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler, 585 qp_init_attr->qp_context); 586 if (!IS_ERR(qp)) 587 __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp); 588 else 589 real_qp->device->destroy_qp(real_qp); 590 } else { 591 qp->event_handler = qp_init_attr->event_handler; 592 qp->qp_context = qp_init_attr->qp_context; 593 if (qp_init_attr->qp_type == IB_QPT_XRC_INI) { 594 qp->recv_cq = NULL; 595 qp->srq = NULL; 596 } else { 597 qp->recv_cq = qp_init_attr->recv_cq; 598 atomic_inc(&qp_init_attr->recv_cq->usecnt); 599 qp->srq = qp_init_attr->srq; 600 if (qp->srq) 601 atomic_inc(&qp_init_attr->srq->usecnt); 602 } 603 604 qp->pd = pd; 605 qp->send_cq = qp_init_attr->send_cq; 606 qp->xrcd = NULL; 607 608 atomic_inc(&pd->usecnt); 609 atomic_inc(&qp_init_attr->send_cq->usecnt); 610 } 611 } 612 613 return qp; 614 } 615 EXPORT_SYMBOL(ib_create_qp); 616 617 static const struct { 618 int valid; 619 enum ib_qp_attr_mask req_param[IB_QPT_MAX]; 620 enum ib_qp_attr_mask req_param_add_eth[IB_QPT_MAX]; 621 enum ib_qp_attr_mask opt_param[IB_QPT_MAX]; 622 enum ib_qp_attr_mask opt_param_add_eth[IB_QPT_MAX]; 623 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { 624 [IB_QPS_RESET] = { 625 [IB_QPS_RESET] = { .valid = 1 }, 626 [IB_QPS_INIT] = { 627 .valid = 1, 628 .req_param = { 629 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 630 IB_QP_PORT | 631 IB_QP_QKEY), 632 [IB_QPT_RAW_PACKET] = IB_QP_PORT, 633 [IB_QPT_UC] = (IB_QP_PKEY_INDEX | 634 IB_QP_PORT | 635 IB_QP_ACCESS_FLAGS), 636 [IB_QPT_RC] = (IB_QP_PKEY_INDEX | 637 IB_QP_PORT | 638 IB_QP_ACCESS_FLAGS), 639 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | 640 IB_QP_PORT | 641 IB_QP_ACCESS_FLAGS), 642 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | 643 IB_QP_PORT | 644 IB_QP_ACCESS_FLAGS), 645 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 646 IB_QP_QKEY), 647 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 648 IB_QP_QKEY), 649 } 650 }, 651 }, 652 [IB_QPS_INIT] = { 653 [IB_QPS_RESET] = { .valid = 1 }, 654 [IB_QPS_ERR] = { .valid = 1 }, 655 [IB_QPS_INIT] = { 656 .valid = 1, 657 .opt_param = { 658 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 659 IB_QP_PORT | 660 IB_QP_QKEY), 661 [IB_QPT_UC] = (IB_QP_PKEY_INDEX | 662 IB_QP_PORT | 663 IB_QP_ACCESS_FLAGS), 664 [IB_QPT_RC] = (IB_QP_PKEY_INDEX | 665 IB_QP_PORT | 666 IB_QP_ACCESS_FLAGS), 667 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | 668 IB_QP_PORT | 669 IB_QP_ACCESS_FLAGS), 670 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | 671 IB_QP_PORT | 672 IB_QP_ACCESS_FLAGS), 673 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 674 IB_QP_QKEY), 675 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 676 IB_QP_QKEY), 677 } 678 }, 679 [IB_QPS_RTR] = { 680 .valid = 1, 681 .req_param = { 682 [IB_QPT_UC] = (IB_QP_AV | 683 IB_QP_PATH_MTU | 684 IB_QP_DEST_QPN | 685 IB_QP_RQ_PSN), 686 [IB_QPT_RC] = (IB_QP_AV | 687 IB_QP_PATH_MTU | 688 IB_QP_DEST_QPN | 689 IB_QP_RQ_PSN | 690 IB_QP_MAX_DEST_RD_ATOMIC | 691 IB_QP_MIN_RNR_TIMER), 692 [IB_QPT_XRC_INI] = (IB_QP_AV | 693 IB_QP_PATH_MTU | 694 IB_QP_DEST_QPN | 695 IB_QP_RQ_PSN), 696 [IB_QPT_XRC_TGT] = (IB_QP_AV | 697 IB_QP_PATH_MTU | 698 IB_QP_DEST_QPN | 699 IB_QP_RQ_PSN | 700 IB_QP_MAX_DEST_RD_ATOMIC | 701 IB_QP_MIN_RNR_TIMER), 702 }, 703 .req_param_add_eth = { 704 [IB_QPT_RC] = (IB_QP_SMAC), 705 [IB_QPT_UC] = (IB_QP_SMAC), 706 [IB_QPT_XRC_INI] = (IB_QP_SMAC), 707 [IB_QPT_XRC_TGT] = (IB_QP_SMAC) 708 }, 709 .opt_param = { 710 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 711 IB_QP_QKEY), 712 [IB_QPT_UC] = (IB_QP_ALT_PATH | 713 IB_QP_ACCESS_FLAGS | 714 IB_QP_PKEY_INDEX), 715 [IB_QPT_RC] = (IB_QP_ALT_PATH | 716 IB_QP_ACCESS_FLAGS | 717 IB_QP_PKEY_INDEX), 718 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH | 719 IB_QP_ACCESS_FLAGS | 720 IB_QP_PKEY_INDEX), 721 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH | 722 IB_QP_ACCESS_FLAGS | 723 IB_QP_PKEY_INDEX), 724 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 725 IB_QP_QKEY), 726 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 727 IB_QP_QKEY), 728 }, 729 .opt_param_add_eth = { 730 [IB_QPT_RC] = (IB_QP_ALT_SMAC | 731 IB_QP_VID | 732 IB_QP_ALT_VID), 733 [IB_QPT_UC] = (IB_QP_ALT_SMAC | 734 IB_QP_VID | 735 IB_QP_ALT_VID), 736 [IB_QPT_XRC_INI] = (IB_QP_ALT_SMAC | 737 IB_QP_VID | 738 IB_QP_ALT_VID), 739 [IB_QPT_XRC_TGT] = (IB_QP_ALT_SMAC | 740 IB_QP_VID | 741 IB_QP_ALT_VID) 742 } 743 } 744 }, 745 [IB_QPS_RTR] = { 746 [IB_QPS_RESET] = { .valid = 1 }, 747 [IB_QPS_ERR] = { .valid = 1 }, 748 [IB_QPS_RTS] = { 749 .valid = 1, 750 .req_param = { 751 [IB_QPT_UD] = IB_QP_SQ_PSN, 752 [IB_QPT_UC] = IB_QP_SQ_PSN, 753 [IB_QPT_RC] = (IB_QP_TIMEOUT | 754 IB_QP_RETRY_CNT | 755 IB_QP_RNR_RETRY | 756 IB_QP_SQ_PSN | 757 IB_QP_MAX_QP_RD_ATOMIC), 758 [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT | 759 IB_QP_RETRY_CNT | 760 IB_QP_RNR_RETRY | 761 IB_QP_SQ_PSN | 762 IB_QP_MAX_QP_RD_ATOMIC), 763 [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT | 764 IB_QP_SQ_PSN), 765 [IB_QPT_SMI] = IB_QP_SQ_PSN, 766 [IB_QPT_GSI] = IB_QP_SQ_PSN, 767 }, 768 .opt_param = { 769 [IB_QPT_UD] = (IB_QP_CUR_STATE | 770 IB_QP_QKEY), 771 [IB_QPT_UC] = (IB_QP_CUR_STATE | 772 IB_QP_ALT_PATH | 773 IB_QP_ACCESS_FLAGS | 774 IB_QP_PATH_MIG_STATE), 775 [IB_QPT_RC] = (IB_QP_CUR_STATE | 776 IB_QP_ALT_PATH | 777 IB_QP_ACCESS_FLAGS | 778 IB_QP_MIN_RNR_TIMER | 779 IB_QP_PATH_MIG_STATE), 780 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | 781 IB_QP_ALT_PATH | 782 IB_QP_ACCESS_FLAGS | 783 IB_QP_PATH_MIG_STATE), 784 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | 785 IB_QP_ALT_PATH | 786 IB_QP_ACCESS_FLAGS | 787 IB_QP_MIN_RNR_TIMER | 788 IB_QP_PATH_MIG_STATE), 789 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 790 IB_QP_QKEY), 791 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 792 IB_QP_QKEY), 793 } 794 } 795 }, 796 [IB_QPS_RTS] = { 797 [IB_QPS_RESET] = { .valid = 1 }, 798 [IB_QPS_ERR] = { .valid = 1 }, 799 [IB_QPS_RTS] = { 800 .valid = 1, 801 .opt_param = { 802 [IB_QPT_UD] = (IB_QP_CUR_STATE | 803 IB_QP_QKEY), 804 [IB_QPT_UC] = (IB_QP_CUR_STATE | 805 IB_QP_ACCESS_FLAGS | 806 IB_QP_ALT_PATH | 807 IB_QP_PATH_MIG_STATE), 808 [IB_QPT_RC] = (IB_QP_CUR_STATE | 809 IB_QP_ACCESS_FLAGS | 810 IB_QP_ALT_PATH | 811 IB_QP_PATH_MIG_STATE | 812 IB_QP_MIN_RNR_TIMER), 813 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | 814 IB_QP_ACCESS_FLAGS | 815 IB_QP_ALT_PATH | 816 IB_QP_PATH_MIG_STATE), 817 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | 818 IB_QP_ACCESS_FLAGS | 819 IB_QP_ALT_PATH | 820 IB_QP_PATH_MIG_STATE | 821 IB_QP_MIN_RNR_TIMER), 822 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 823 IB_QP_QKEY), 824 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 825 IB_QP_QKEY), 826 } 827 }, 828 [IB_QPS_SQD] = { 829 .valid = 1, 830 .opt_param = { 831 [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY, 832 [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY, 833 [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY, 834 [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY, 835 [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */ 836 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY, 837 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY 838 } 839 }, 840 }, 841 [IB_QPS_SQD] = { 842 [IB_QPS_RESET] = { .valid = 1 }, 843 [IB_QPS_ERR] = { .valid = 1 }, 844 [IB_QPS_RTS] = { 845 .valid = 1, 846 .opt_param = { 847 [IB_QPT_UD] = (IB_QP_CUR_STATE | 848 IB_QP_QKEY), 849 [IB_QPT_UC] = (IB_QP_CUR_STATE | 850 IB_QP_ALT_PATH | 851 IB_QP_ACCESS_FLAGS | 852 IB_QP_PATH_MIG_STATE), 853 [IB_QPT_RC] = (IB_QP_CUR_STATE | 854 IB_QP_ALT_PATH | 855 IB_QP_ACCESS_FLAGS | 856 IB_QP_MIN_RNR_TIMER | 857 IB_QP_PATH_MIG_STATE), 858 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | 859 IB_QP_ALT_PATH | 860 IB_QP_ACCESS_FLAGS | 861 IB_QP_PATH_MIG_STATE), 862 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | 863 IB_QP_ALT_PATH | 864 IB_QP_ACCESS_FLAGS | 865 IB_QP_MIN_RNR_TIMER | 866 IB_QP_PATH_MIG_STATE), 867 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 868 IB_QP_QKEY), 869 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 870 IB_QP_QKEY), 871 } 872 }, 873 [IB_QPS_SQD] = { 874 .valid = 1, 875 .opt_param = { 876 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 877 IB_QP_QKEY), 878 [IB_QPT_UC] = (IB_QP_AV | 879 IB_QP_ALT_PATH | 880 IB_QP_ACCESS_FLAGS | 881 IB_QP_PKEY_INDEX | 882 IB_QP_PATH_MIG_STATE), 883 [IB_QPT_RC] = (IB_QP_PORT | 884 IB_QP_AV | 885 IB_QP_TIMEOUT | 886 IB_QP_RETRY_CNT | 887 IB_QP_RNR_RETRY | 888 IB_QP_MAX_QP_RD_ATOMIC | 889 IB_QP_MAX_DEST_RD_ATOMIC | 890 IB_QP_ALT_PATH | 891 IB_QP_ACCESS_FLAGS | 892 IB_QP_PKEY_INDEX | 893 IB_QP_MIN_RNR_TIMER | 894 IB_QP_PATH_MIG_STATE), 895 [IB_QPT_XRC_INI] = (IB_QP_PORT | 896 IB_QP_AV | 897 IB_QP_TIMEOUT | 898 IB_QP_RETRY_CNT | 899 IB_QP_RNR_RETRY | 900 IB_QP_MAX_QP_RD_ATOMIC | 901 IB_QP_ALT_PATH | 902 IB_QP_ACCESS_FLAGS | 903 IB_QP_PKEY_INDEX | 904 IB_QP_PATH_MIG_STATE), 905 [IB_QPT_XRC_TGT] = (IB_QP_PORT | 906 IB_QP_AV | 907 IB_QP_TIMEOUT | 908 IB_QP_MAX_DEST_RD_ATOMIC | 909 IB_QP_ALT_PATH | 910 IB_QP_ACCESS_FLAGS | 911 IB_QP_PKEY_INDEX | 912 IB_QP_MIN_RNR_TIMER | 913 IB_QP_PATH_MIG_STATE), 914 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 915 IB_QP_QKEY), 916 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 917 IB_QP_QKEY), 918 } 919 } 920 }, 921 [IB_QPS_SQE] = { 922 [IB_QPS_RESET] = { .valid = 1 }, 923 [IB_QPS_ERR] = { .valid = 1 }, 924 [IB_QPS_RTS] = { 925 .valid = 1, 926 .opt_param = { 927 [IB_QPT_UD] = (IB_QP_CUR_STATE | 928 IB_QP_QKEY), 929 [IB_QPT_UC] = (IB_QP_CUR_STATE | 930 IB_QP_ACCESS_FLAGS), 931 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 932 IB_QP_QKEY), 933 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 934 IB_QP_QKEY), 935 } 936 } 937 }, 938 [IB_QPS_ERR] = { 939 [IB_QPS_RESET] = { .valid = 1 }, 940 [IB_QPS_ERR] = { .valid = 1 } 941 } 942 }; 943 944 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, 945 enum ib_qp_type type, enum ib_qp_attr_mask mask, 946 enum rdma_link_layer ll) 947 { 948 enum ib_qp_attr_mask req_param, opt_param; 949 950 if (cur_state < 0 || cur_state > IB_QPS_ERR || 951 next_state < 0 || next_state > IB_QPS_ERR) 952 return 0; 953 954 if (mask & IB_QP_CUR_STATE && 955 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS && 956 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE) 957 return 0; 958 959 if (!qp_state_table[cur_state][next_state].valid) 960 return 0; 961 962 req_param = qp_state_table[cur_state][next_state].req_param[type]; 963 opt_param = qp_state_table[cur_state][next_state].opt_param[type]; 964 965 if (ll == IB_LINK_LAYER_ETHERNET) { 966 req_param |= qp_state_table[cur_state][next_state]. 967 req_param_add_eth[type]; 968 opt_param |= qp_state_table[cur_state][next_state]. 969 opt_param_add_eth[type]; 970 } 971 972 if ((mask & req_param) != req_param) 973 return 0; 974 975 if (mask & ~(req_param | opt_param | IB_QP_STATE)) 976 return 0; 977 978 return 1; 979 } 980 EXPORT_SYMBOL(ib_modify_qp_is_ok); 981 982 int ib_resolve_eth_l2_attrs(struct ib_qp *qp, 983 struct ib_qp_attr *qp_attr, int *qp_attr_mask) 984 { 985 int ret = 0; 986 union ib_gid sgid; 987 988 if ((*qp_attr_mask & IB_QP_AV) && 989 (rdma_cap_eth_ah(qp->device, qp_attr->ah_attr.port_num))) { 990 ret = ib_query_gid(qp->device, qp_attr->ah_attr.port_num, 991 qp_attr->ah_attr.grh.sgid_index, &sgid); 992 if (ret) 993 goto out; 994 if (rdma_link_local_addr((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw)) { 995 rdma_get_ll_mac((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw, qp_attr->ah_attr.dmac); 996 rdma_get_ll_mac((struct in6_addr *)sgid.raw, qp_attr->smac); 997 if (!(*qp_attr_mask & IB_QP_VID)) 998 qp_attr->vlan_id = rdma_get_vlan_id(&sgid); 999 } else { 1000 ret = rdma_addr_find_dmac_by_grh(&sgid, &qp_attr->ah_attr.grh.dgid, 1001 qp_attr->ah_attr.dmac, &qp_attr->vlan_id); 1002 if (ret) 1003 goto out; 1004 ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr->smac, NULL); 1005 if (ret) 1006 goto out; 1007 } 1008 *qp_attr_mask |= IB_QP_SMAC; 1009 if (qp_attr->vlan_id < 0xFFFF) 1010 *qp_attr_mask |= IB_QP_VID; 1011 } 1012 out: 1013 return ret; 1014 } 1015 EXPORT_SYMBOL(ib_resolve_eth_l2_attrs); 1016 1017 1018 int ib_modify_qp(struct ib_qp *qp, 1019 struct ib_qp_attr *qp_attr, 1020 int qp_attr_mask) 1021 { 1022 int ret; 1023 1024 ret = ib_resolve_eth_l2_attrs(qp, qp_attr, &qp_attr_mask); 1025 if (ret) 1026 return ret; 1027 1028 return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); 1029 } 1030 EXPORT_SYMBOL(ib_modify_qp); 1031 1032 int ib_query_qp(struct ib_qp *qp, 1033 struct ib_qp_attr *qp_attr, 1034 int qp_attr_mask, 1035 struct ib_qp_init_attr *qp_init_attr) 1036 { 1037 return qp->device->query_qp ? 1038 qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) : 1039 -ENOSYS; 1040 } 1041 EXPORT_SYMBOL(ib_query_qp); 1042 1043 int ib_close_qp(struct ib_qp *qp) 1044 { 1045 struct ib_qp *real_qp; 1046 unsigned long flags; 1047 1048 real_qp = qp->real_qp; 1049 if (real_qp == qp) 1050 return -EINVAL; 1051 1052 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags); 1053 list_del(&qp->open_list); 1054 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); 1055 1056 atomic_dec(&real_qp->usecnt); 1057 kfree(qp); 1058 1059 return 0; 1060 } 1061 EXPORT_SYMBOL(ib_close_qp); 1062 1063 static int __ib_destroy_shared_qp(struct ib_qp *qp) 1064 { 1065 struct ib_xrcd *xrcd; 1066 struct ib_qp *real_qp; 1067 int ret; 1068 1069 real_qp = qp->real_qp; 1070 xrcd = real_qp->xrcd; 1071 1072 mutex_lock(&xrcd->tgt_qp_mutex); 1073 ib_close_qp(qp); 1074 if (atomic_read(&real_qp->usecnt) == 0) 1075 list_del(&real_qp->xrcd_list); 1076 else 1077 real_qp = NULL; 1078 mutex_unlock(&xrcd->tgt_qp_mutex); 1079 1080 if (real_qp) { 1081 ret = ib_destroy_qp(real_qp); 1082 if (!ret) 1083 atomic_dec(&xrcd->usecnt); 1084 else 1085 __ib_insert_xrcd_qp(xrcd, real_qp); 1086 } 1087 1088 return 0; 1089 } 1090 1091 int ib_destroy_qp(struct ib_qp *qp) 1092 { 1093 struct ib_pd *pd; 1094 struct ib_cq *scq, *rcq; 1095 struct ib_srq *srq; 1096 int ret; 1097 1098 if (atomic_read(&qp->usecnt)) 1099 return -EBUSY; 1100 1101 if (qp->real_qp != qp) 1102 return __ib_destroy_shared_qp(qp); 1103 1104 pd = qp->pd; 1105 scq = qp->send_cq; 1106 rcq = qp->recv_cq; 1107 srq = qp->srq; 1108 1109 ret = qp->device->destroy_qp(qp); 1110 if (!ret) { 1111 if (pd) 1112 atomic_dec(&pd->usecnt); 1113 if (scq) 1114 atomic_dec(&scq->usecnt); 1115 if (rcq) 1116 atomic_dec(&rcq->usecnt); 1117 if (srq) 1118 atomic_dec(&srq->usecnt); 1119 } 1120 1121 return ret; 1122 } 1123 EXPORT_SYMBOL(ib_destroy_qp); 1124 1125 /* Completion queues */ 1126 1127 struct ib_cq *ib_create_cq(struct ib_device *device, 1128 ib_comp_handler comp_handler, 1129 void (*event_handler)(struct ib_event *, void *), 1130 void *cq_context, 1131 const struct ib_cq_init_attr *cq_attr) 1132 { 1133 struct ib_cq *cq; 1134 1135 cq = device->create_cq(device, cq_attr, NULL, NULL); 1136 1137 if (!IS_ERR(cq)) { 1138 cq->device = device; 1139 cq->uobject = NULL; 1140 cq->comp_handler = comp_handler; 1141 cq->event_handler = event_handler; 1142 cq->cq_context = cq_context; 1143 atomic_set(&cq->usecnt, 0); 1144 } 1145 1146 return cq; 1147 } 1148 EXPORT_SYMBOL(ib_create_cq); 1149 1150 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) 1151 { 1152 return cq->device->modify_cq ? 1153 cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS; 1154 } 1155 EXPORT_SYMBOL(ib_modify_cq); 1156 1157 int ib_destroy_cq(struct ib_cq *cq) 1158 { 1159 if (atomic_read(&cq->usecnt)) 1160 return -EBUSY; 1161 1162 return cq->device->destroy_cq(cq); 1163 } 1164 EXPORT_SYMBOL(ib_destroy_cq); 1165 1166 int ib_resize_cq(struct ib_cq *cq, int cqe) 1167 { 1168 return cq->device->resize_cq ? 1169 cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS; 1170 } 1171 EXPORT_SYMBOL(ib_resize_cq); 1172 1173 /* Memory regions */ 1174 1175 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags) 1176 { 1177 struct ib_mr *mr; 1178 int err; 1179 1180 err = ib_check_mr_access(mr_access_flags); 1181 if (err) 1182 return ERR_PTR(err); 1183 1184 mr = pd->device->get_dma_mr(pd, mr_access_flags); 1185 1186 if (!IS_ERR(mr)) { 1187 mr->device = pd->device; 1188 mr->pd = pd; 1189 mr->uobject = NULL; 1190 atomic_inc(&pd->usecnt); 1191 atomic_set(&mr->usecnt, 0); 1192 } 1193 1194 return mr; 1195 } 1196 EXPORT_SYMBOL(ib_get_dma_mr); 1197 1198 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr) 1199 { 1200 return mr->device->query_mr ? 1201 mr->device->query_mr(mr, mr_attr) : -ENOSYS; 1202 } 1203 EXPORT_SYMBOL(ib_query_mr); 1204 1205 int ib_dereg_mr(struct ib_mr *mr) 1206 { 1207 struct ib_pd *pd; 1208 int ret; 1209 1210 if (atomic_read(&mr->usecnt)) 1211 return -EBUSY; 1212 1213 pd = mr->pd; 1214 ret = mr->device->dereg_mr(mr); 1215 if (!ret) 1216 atomic_dec(&pd->usecnt); 1217 1218 return ret; 1219 } 1220 EXPORT_SYMBOL(ib_dereg_mr); 1221 1222 /** 1223 * ib_alloc_mr() - Allocates a memory region 1224 * @pd: protection domain associated with the region 1225 * @mr_type: memory region type 1226 * @max_num_sg: maximum sg entries available for registration. 1227 * 1228 * Notes: 1229 * Memory registeration page/sg lists must not exceed max_num_sg. 1230 * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed 1231 * max_num_sg * used_page_size. 1232 * 1233 */ 1234 struct ib_mr *ib_alloc_mr(struct ib_pd *pd, 1235 enum ib_mr_type mr_type, 1236 u32 max_num_sg) 1237 { 1238 struct ib_mr *mr; 1239 1240 if (!pd->device->alloc_mr) 1241 return ERR_PTR(-ENOSYS); 1242 1243 mr = pd->device->alloc_mr(pd, mr_type, max_num_sg); 1244 if (!IS_ERR(mr)) { 1245 mr->device = pd->device; 1246 mr->pd = pd; 1247 mr->uobject = NULL; 1248 atomic_inc(&pd->usecnt); 1249 atomic_set(&mr->usecnt, 0); 1250 } 1251 1252 return mr; 1253 } 1254 EXPORT_SYMBOL(ib_alloc_mr); 1255 1256 struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(struct ib_device *device, 1257 int max_page_list_len) 1258 { 1259 struct ib_fast_reg_page_list *page_list; 1260 1261 if (!device->alloc_fast_reg_page_list) 1262 return ERR_PTR(-ENOSYS); 1263 1264 page_list = device->alloc_fast_reg_page_list(device, max_page_list_len); 1265 1266 if (!IS_ERR(page_list)) { 1267 page_list->device = device; 1268 page_list->max_page_list_len = max_page_list_len; 1269 } 1270 1271 return page_list; 1272 } 1273 EXPORT_SYMBOL(ib_alloc_fast_reg_page_list); 1274 1275 void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list) 1276 { 1277 page_list->device->free_fast_reg_page_list(page_list); 1278 } 1279 EXPORT_SYMBOL(ib_free_fast_reg_page_list); 1280 1281 /* Memory windows */ 1282 1283 struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type) 1284 { 1285 struct ib_mw *mw; 1286 1287 if (!pd->device->alloc_mw) 1288 return ERR_PTR(-ENOSYS); 1289 1290 mw = pd->device->alloc_mw(pd, type); 1291 if (!IS_ERR(mw)) { 1292 mw->device = pd->device; 1293 mw->pd = pd; 1294 mw->uobject = NULL; 1295 mw->type = type; 1296 atomic_inc(&pd->usecnt); 1297 } 1298 1299 return mw; 1300 } 1301 EXPORT_SYMBOL(ib_alloc_mw); 1302 1303 int ib_dealloc_mw(struct ib_mw *mw) 1304 { 1305 struct ib_pd *pd; 1306 int ret; 1307 1308 pd = mw->pd; 1309 ret = mw->device->dealloc_mw(mw); 1310 if (!ret) 1311 atomic_dec(&pd->usecnt); 1312 1313 return ret; 1314 } 1315 EXPORT_SYMBOL(ib_dealloc_mw); 1316 1317 /* "Fast" memory regions */ 1318 1319 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, 1320 int mr_access_flags, 1321 struct ib_fmr_attr *fmr_attr) 1322 { 1323 struct ib_fmr *fmr; 1324 1325 if (!pd->device->alloc_fmr) 1326 return ERR_PTR(-ENOSYS); 1327 1328 fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr); 1329 if (!IS_ERR(fmr)) { 1330 fmr->device = pd->device; 1331 fmr->pd = pd; 1332 atomic_inc(&pd->usecnt); 1333 } 1334 1335 return fmr; 1336 } 1337 EXPORT_SYMBOL(ib_alloc_fmr); 1338 1339 int ib_unmap_fmr(struct list_head *fmr_list) 1340 { 1341 struct ib_fmr *fmr; 1342 1343 if (list_empty(fmr_list)) 1344 return 0; 1345 1346 fmr = list_entry(fmr_list->next, struct ib_fmr, list); 1347 return fmr->device->unmap_fmr(fmr_list); 1348 } 1349 EXPORT_SYMBOL(ib_unmap_fmr); 1350 1351 int ib_dealloc_fmr(struct ib_fmr *fmr) 1352 { 1353 struct ib_pd *pd; 1354 int ret; 1355 1356 pd = fmr->pd; 1357 ret = fmr->device->dealloc_fmr(fmr); 1358 if (!ret) 1359 atomic_dec(&pd->usecnt); 1360 1361 return ret; 1362 } 1363 EXPORT_SYMBOL(ib_dealloc_fmr); 1364 1365 /* Multicast groups */ 1366 1367 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) 1368 { 1369 int ret; 1370 1371 if (!qp->device->attach_mcast) 1372 return -ENOSYS; 1373 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD) 1374 return -EINVAL; 1375 1376 ret = qp->device->attach_mcast(qp, gid, lid); 1377 if (!ret) 1378 atomic_inc(&qp->usecnt); 1379 return ret; 1380 } 1381 EXPORT_SYMBOL(ib_attach_mcast); 1382 1383 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) 1384 { 1385 int ret; 1386 1387 if (!qp->device->detach_mcast) 1388 return -ENOSYS; 1389 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD) 1390 return -EINVAL; 1391 1392 ret = qp->device->detach_mcast(qp, gid, lid); 1393 if (!ret) 1394 atomic_dec(&qp->usecnt); 1395 return ret; 1396 } 1397 EXPORT_SYMBOL(ib_detach_mcast); 1398 1399 struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device) 1400 { 1401 struct ib_xrcd *xrcd; 1402 1403 if (!device->alloc_xrcd) 1404 return ERR_PTR(-ENOSYS); 1405 1406 xrcd = device->alloc_xrcd(device, NULL, NULL); 1407 if (!IS_ERR(xrcd)) { 1408 xrcd->device = device; 1409 xrcd->inode = NULL; 1410 atomic_set(&xrcd->usecnt, 0); 1411 mutex_init(&xrcd->tgt_qp_mutex); 1412 INIT_LIST_HEAD(&xrcd->tgt_qp_list); 1413 } 1414 1415 return xrcd; 1416 } 1417 EXPORT_SYMBOL(ib_alloc_xrcd); 1418 1419 int ib_dealloc_xrcd(struct ib_xrcd *xrcd) 1420 { 1421 struct ib_qp *qp; 1422 int ret; 1423 1424 if (atomic_read(&xrcd->usecnt)) 1425 return -EBUSY; 1426 1427 while (!list_empty(&xrcd->tgt_qp_list)) { 1428 qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list); 1429 ret = ib_destroy_qp(qp); 1430 if (ret) 1431 return ret; 1432 } 1433 1434 return xrcd->device->dealloc_xrcd(xrcd); 1435 } 1436 EXPORT_SYMBOL(ib_dealloc_xrcd); 1437 1438 struct ib_flow *ib_create_flow(struct ib_qp *qp, 1439 struct ib_flow_attr *flow_attr, 1440 int domain) 1441 { 1442 struct ib_flow *flow_id; 1443 if (!qp->device->create_flow) 1444 return ERR_PTR(-ENOSYS); 1445 1446 flow_id = qp->device->create_flow(qp, flow_attr, domain); 1447 if (!IS_ERR(flow_id)) 1448 atomic_inc(&qp->usecnt); 1449 return flow_id; 1450 } 1451 EXPORT_SYMBOL(ib_create_flow); 1452 1453 int ib_destroy_flow(struct ib_flow *flow_id) 1454 { 1455 int err; 1456 struct ib_qp *qp = flow_id->qp; 1457 1458 err = qp->device->destroy_flow(flow_id); 1459 if (!err) 1460 atomic_dec(&qp->usecnt); 1461 return err; 1462 } 1463 EXPORT_SYMBOL(ib_destroy_flow); 1464 1465 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, 1466 struct ib_mr_status *mr_status) 1467 { 1468 return mr->device->check_mr_status ? 1469 mr->device->check_mr_status(mr, check_mask, mr_status) : -ENOSYS; 1470 } 1471 EXPORT_SYMBOL(ib_check_mr_status); 1472