1 /* 2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 8 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. 9 * 10 * This software is available to you under a choice of one of two 11 * licenses. You may choose to be licensed under the terms of the GNU 12 * General Public License (GPL) Version 2, available from the file 13 * COPYING in the main directory of this source tree, or the 14 * OpenIB.org BSD license below: 15 * 16 * Redistribution and use in source and binary forms, with or 17 * without modification, are permitted provided that the following 18 * conditions are met: 19 * 20 * - Redistributions of source code must retain the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer. 23 * 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials 27 * provided with the distribution. 28 * 29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 36 * SOFTWARE. 37 */ 38 39 #include <linux/errno.h> 40 #include <linux/err.h> 41 #include <linux/export.h> 42 #include <linux/string.h> 43 #include <linux/slab.h> 44 45 #include <rdma/ib_verbs.h> 46 #include <rdma/ib_cache.h> 47 #include <rdma/ib_addr.h> 48 49 #include "core_priv.h" 50 51 static const char * const ib_events[] = { 52 [IB_EVENT_CQ_ERR] = "CQ error", 53 [IB_EVENT_QP_FATAL] = "QP fatal error", 54 [IB_EVENT_QP_REQ_ERR] = "QP request error", 55 [IB_EVENT_QP_ACCESS_ERR] = "QP access error", 56 [IB_EVENT_COMM_EST] = "communication established", 57 [IB_EVENT_SQ_DRAINED] = "send queue drained", 58 [IB_EVENT_PATH_MIG] = "path migration successful", 59 [IB_EVENT_PATH_MIG_ERR] = "path migration error", 60 [IB_EVENT_DEVICE_FATAL] = "device fatal error", 61 [IB_EVENT_PORT_ACTIVE] = "port active", 62 [IB_EVENT_PORT_ERR] = "port error", 63 [IB_EVENT_LID_CHANGE] = "LID change", 64 [IB_EVENT_PKEY_CHANGE] = "P_key change", 65 [IB_EVENT_SM_CHANGE] = "SM change", 66 [IB_EVENT_SRQ_ERR] = "SRQ error", 67 [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached", 68 [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached", 69 [IB_EVENT_CLIENT_REREGISTER] = "client reregister", 70 [IB_EVENT_GID_CHANGE] = "GID changed", 71 }; 72 73 const char *ib_event_msg(enum ib_event_type event) 74 { 75 size_t index = event; 76 77 return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ? 78 ib_events[index] : "unrecognized event"; 79 } 80 EXPORT_SYMBOL(ib_event_msg); 81 82 static const char * const wc_statuses[] = { 83 [IB_WC_SUCCESS] = "success", 84 [IB_WC_LOC_LEN_ERR] = "local length error", 85 [IB_WC_LOC_QP_OP_ERR] = "local QP operation error", 86 [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error", 87 [IB_WC_LOC_PROT_ERR] = "local protection error", 88 [IB_WC_WR_FLUSH_ERR] = "WR flushed", 89 [IB_WC_MW_BIND_ERR] = "memory management operation error", 90 [IB_WC_BAD_RESP_ERR] = "bad response error", 91 [IB_WC_LOC_ACCESS_ERR] = "local access error", 92 [IB_WC_REM_INV_REQ_ERR] = "invalid request error", 93 [IB_WC_REM_ACCESS_ERR] = "remote access error", 94 [IB_WC_REM_OP_ERR] = "remote operation error", 95 [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded", 96 [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded", 97 [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error", 98 [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request", 99 [IB_WC_REM_ABORT_ERR] = "operation aborted", 100 [IB_WC_INV_EECN_ERR] = "invalid EE context number", 101 [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state", 102 [IB_WC_FATAL_ERR] = "fatal error", 103 [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error", 104 [IB_WC_GENERAL_ERR] = "general error", 105 }; 106 107 const char *ib_wc_status_msg(enum ib_wc_status status) 108 { 109 size_t index = status; 110 111 return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ? 112 wc_statuses[index] : "unrecognized status"; 113 } 114 EXPORT_SYMBOL(ib_wc_status_msg); 115 116 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate) 117 { 118 switch (rate) { 119 case IB_RATE_2_5_GBPS: return 1; 120 case IB_RATE_5_GBPS: return 2; 121 case IB_RATE_10_GBPS: return 4; 122 case IB_RATE_20_GBPS: return 8; 123 case IB_RATE_30_GBPS: return 12; 124 case IB_RATE_40_GBPS: return 16; 125 case IB_RATE_60_GBPS: return 24; 126 case IB_RATE_80_GBPS: return 32; 127 case IB_RATE_120_GBPS: return 48; 128 default: return -1; 129 } 130 } 131 EXPORT_SYMBOL(ib_rate_to_mult); 132 133 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult) 134 { 135 switch (mult) { 136 case 1: return IB_RATE_2_5_GBPS; 137 case 2: return IB_RATE_5_GBPS; 138 case 4: return IB_RATE_10_GBPS; 139 case 8: return IB_RATE_20_GBPS; 140 case 12: return IB_RATE_30_GBPS; 141 case 16: return IB_RATE_40_GBPS; 142 case 24: return IB_RATE_60_GBPS; 143 case 32: return IB_RATE_80_GBPS; 144 case 48: return IB_RATE_120_GBPS; 145 default: return IB_RATE_PORT_CURRENT; 146 } 147 } 148 EXPORT_SYMBOL(mult_to_ib_rate); 149 150 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate) 151 { 152 switch (rate) { 153 case IB_RATE_2_5_GBPS: return 2500; 154 case IB_RATE_5_GBPS: return 5000; 155 case IB_RATE_10_GBPS: return 10000; 156 case IB_RATE_20_GBPS: return 20000; 157 case IB_RATE_30_GBPS: return 30000; 158 case IB_RATE_40_GBPS: return 40000; 159 case IB_RATE_60_GBPS: return 60000; 160 case IB_RATE_80_GBPS: return 80000; 161 case IB_RATE_120_GBPS: return 120000; 162 case IB_RATE_14_GBPS: return 14062; 163 case IB_RATE_56_GBPS: return 56250; 164 case IB_RATE_112_GBPS: return 112500; 165 case IB_RATE_168_GBPS: return 168750; 166 case IB_RATE_25_GBPS: return 25781; 167 case IB_RATE_100_GBPS: return 103125; 168 case IB_RATE_200_GBPS: return 206250; 169 case IB_RATE_300_GBPS: return 309375; 170 default: return -1; 171 } 172 } 173 EXPORT_SYMBOL(ib_rate_to_mbps); 174 175 __attribute_const__ enum rdma_transport_type 176 rdma_node_get_transport(enum rdma_node_type node_type) 177 { 178 switch (node_type) { 179 case RDMA_NODE_IB_CA: 180 case RDMA_NODE_IB_SWITCH: 181 case RDMA_NODE_IB_ROUTER: 182 return RDMA_TRANSPORT_IB; 183 case RDMA_NODE_RNIC: 184 return RDMA_TRANSPORT_IWARP; 185 case RDMA_NODE_USNIC: 186 return RDMA_TRANSPORT_USNIC; 187 case RDMA_NODE_USNIC_UDP: 188 return RDMA_TRANSPORT_USNIC_UDP; 189 default: 190 BUG(); 191 return 0; 192 } 193 } 194 EXPORT_SYMBOL(rdma_node_get_transport); 195 196 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num) 197 { 198 if (device->get_link_layer) 199 return device->get_link_layer(device, port_num); 200 201 switch (rdma_node_get_transport(device->node_type)) { 202 case RDMA_TRANSPORT_IB: 203 return IB_LINK_LAYER_INFINIBAND; 204 case RDMA_TRANSPORT_IWARP: 205 case RDMA_TRANSPORT_USNIC: 206 case RDMA_TRANSPORT_USNIC_UDP: 207 return IB_LINK_LAYER_ETHERNET; 208 default: 209 return IB_LINK_LAYER_UNSPECIFIED; 210 } 211 } 212 EXPORT_SYMBOL(rdma_port_get_link_layer); 213 214 /* Protection domains */ 215 216 struct ib_pd *ib_alloc_pd(struct ib_device *device) 217 { 218 struct ib_pd *pd; 219 220 pd = device->alloc_pd(device, NULL, NULL); 221 222 if (!IS_ERR(pd)) { 223 pd->device = device; 224 pd->uobject = NULL; 225 atomic_set(&pd->usecnt, 0); 226 } 227 228 return pd; 229 } 230 EXPORT_SYMBOL(ib_alloc_pd); 231 232 int ib_dealloc_pd(struct ib_pd *pd) 233 { 234 if (atomic_read(&pd->usecnt)) 235 return -EBUSY; 236 237 return pd->device->dealloc_pd(pd); 238 } 239 EXPORT_SYMBOL(ib_dealloc_pd); 240 241 /* Address handles */ 242 243 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) 244 { 245 struct ib_ah *ah; 246 247 ah = pd->device->create_ah(pd, ah_attr); 248 249 if (!IS_ERR(ah)) { 250 ah->device = pd->device; 251 ah->pd = pd; 252 ah->uobject = NULL; 253 atomic_inc(&pd->usecnt); 254 } 255 256 return ah; 257 } 258 EXPORT_SYMBOL(ib_create_ah); 259 260 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, 261 const struct ib_wc *wc, const struct ib_grh *grh, 262 struct ib_ah_attr *ah_attr) 263 { 264 u32 flow_class; 265 u16 gid_index; 266 int ret; 267 268 memset(ah_attr, 0, sizeof *ah_attr); 269 if (rdma_cap_eth_ah(device, port_num)) { 270 if (!(wc->wc_flags & IB_WC_GRH)) 271 return -EPROTOTYPE; 272 273 if (wc->wc_flags & IB_WC_WITH_SMAC && 274 wc->wc_flags & IB_WC_WITH_VLAN) { 275 memcpy(ah_attr->dmac, wc->smac, ETH_ALEN); 276 ah_attr->vlan_id = wc->vlan_id; 277 } else { 278 ret = rdma_addr_find_dmac_by_grh(&grh->dgid, &grh->sgid, 279 ah_attr->dmac, &ah_attr->vlan_id); 280 if (ret) 281 return ret; 282 } 283 } else { 284 ah_attr->vlan_id = 0xffff; 285 } 286 287 ah_attr->dlid = wc->slid; 288 ah_attr->sl = wc->sl; 289 ah_attr->src_path_bits = wc->dlid_path_bits; 290 ah_attr->port_num = port_num; 291 292 if (wc->wc_flags & IB_WC_GRH) { 293 ah_attr->ah_flags = IB_AH_GRH; 294 ah_attr->grh.dgid = grh->sgid; 295 296 ret = ib_find_cached_gid(device, &grh->dgid, &port_num, 297 &gid_index); 298 if (ret) 299 return ret; 300 301 ah_attr->grh.sgid_index = (u8) gid_index; 302 flow_class = be32_to_cpu(grh->version_tclass_flow); 303 ah_attr->grh.flow_label = flow_class & 0xFFFFF; 304 ah_attr->grh.hop_limit = 0xFF; 305 ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF; 306 } 307 return 0; 308 } 309 EXPORT_SYMBOL(ib_init_ah_from_wc); 310 311 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, 312 const struct ib_grh *grh, u8 port_num) 313 { 314 struct ib_ah_attr ah_attr; 315 int ret; 316 317 ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr); 318 if (ret) 319 return ERR_PTR(ret); 320 321 return ib_create_ah(pd, &ah_attr); 322 } 323 EXPORT_SYMBOL(ib_create_ah_from_wc); 324 325 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) 326 { 327 return ah->device->modify_ah ? 328 ah->device->modify_ah(ah, ah_attr) : 329 -ENOSYS; 330 } 331 EXPORT_SYMBOL(ib_modify_ah); 332 333 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) 334 { 335 return ah->device->query_ah ? 336 ah->device->query_ah(ah, ah_attr) : 337 -ENOSYS; 338 } 339 EXPORT_SYMBOL(ib_query_ah); 340 341 int ib_destroy_ah(struct ib_ah *ah) 342 { 343 struct ib_pd *pd; 344 int ret; 345 346 pd = ah->pd; 347 ret = ah->device->destroy_ah(ah); 348 if (!ret) 349 atomic_dec(&pd->usecnt); 350 351 return ret; 352 } 353 EXPORT_SYMBOL(ib_destroy_ah); 354 355 /* Shared receive queues */ 356 357 struct ib_srq *ib_create_srq(struct ib_pd *pd, 358 struct ib_srq_init_attr *srq_init_attr) 359 { 360 struct ib_srq *srq; 361 362 if (!pd->device->create_srq) 363 return ERR_PTR(-ENOSYS); 364 365 srq = pd->device->create_srq(pd, srq_init_attr, NULL); 366 367 if (!IS_ERR(srq)) { 368 srq->device = pd->device; 369 srq->pd = pd; 370 srq->uobject = NULL; 371 srq->event_handler = srq_init_attr->event_handler; 372 srq->srq_context = srq_init_attr->srq_context; 373 srq->srq_type = srq_init_attr->srq_type; 374 if (srq->srq_type == IB_SRQT_XRC) { 375 srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd; 376 srq->ext.xrc.cq = srq_init_attr->ext.xrc.cq; 377 atomic_inc(&srq->ext.xrc.xrcd->usecnt); 378 atomic_inc(&srq->ext.xrc.cq->usecnt); 379 } 380 atomic_inc(&pd->usecnt); 381 atomic_set(&srq->usecnt, 0); 382 } 383 384 return srq; 385 } 386 EXPORT_SYMBOL(ib_create_srq); 387 388 int ib_modify_srq(struct ib_srq *srq, 389 struct ib_srq_attr *srq_attr, 390 enum ib_srq_attr_mask srq_attr_mask) 391 { 392 return srq->device->modify_srq ? 393 srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) : 394 -ENOSYS; 395 } 396 EXPORT_SYMBOL(ib_modify_srq); 397 398 int ib_query_srq(struct ib_srq *srq, 399 struct ib_srq_attr *srq_attr) 400 { 401 return srq->device->query_srq ? 402 srq->device->query_srq(srq, srq_attr) : -ENOSYS; 403 } 404 EXPORT_SYMBOL(ib_query_srq); 405 406 int ib_destroy_srq(struct ib_srq *srq) 407 { 408 struct ib_pd *pd; 409 enum ib_srq_type srq_type; 410 struct ib_xrcd *uninitialized_var(xrcd); 411 struct ib_cq *uninitialized_var(cq); 412 int ret; 413 414 if (atomic_read(&srq->usecnt)) 415 return -EBUSY; 416 417 pd = srq->pd; 418 srq_type = srq->srq_type; 419 if (srq_type == IB_SRQT_XRC) { 420 xrcd = srq->ext.xrc.xrcd; 421 cq = srq->ext.xrc.cq; 422 } 423 424 ret = srq->device->destroy_srq(srq); 425 if (!ret) { 426 atomic_dec(&pd->usecnt); 427 if (srq_type == IB_SRQT_XRC) { 428 atomic_dec(&xrcd->usecnt); 429 atomic_dec(&cq->usecnt); 430 } 431 } 432 433 return ret; 434 } 435 EXPORT_SYMBOL(ib_destroy_srq); 436 437 /* Queue pairs */ 438 439 static void __ib_shared_qp_event_handler(struct ib_event *event, void *context) 440 { 441 struct ib_qp *qp = context; 442 unsigned long flags; 443 444 spin_lock_irqsave(&qp->device->event_handler_lock, flags); 445 list_for_each_entry(event->element.qp, &qp->open_list, open_list) 446 if (event->element.qp->event_handler) 447 event->element.qp->event_handler(event, event->element.qp->qp_context); 448 spin_unlock_irqrestore(&qp->device->event_handler_lock, flags); 449 } 450 451 static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) 452 { 453 mutex_lock(&xrcd->tgt_qp_mutex); 454 list_add(&qp->xrcd_list, &xrcd->tgt_qp_list); 455 mutex_unlock(&xrcd->tgt_qp_mutex); 456 } 457 458 static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp, 459 void (*event_handler)(struct ib_event *, void *), 460 void *qp_context) 461 { 462 struct ib_qp *qp; 463 unsigned long flags; 464 465 qp = kzalloc(sizeof *qp, GFP_KERNEL); 466 if (!qp) 467 return ERR_PTR(-ENOMEM); 468 469 qp->real_qp = real_qp; 470 atomic_inc(&real_qp->usecnt); 471 qp->device = real_qp->device; 472 qp->event_handler = event_handler; 473 qp->qp_context = qp_context; 474 qp->qp_num = real_qp->qp_num; 475 qp->qp_type = real_qp->qp_type; 476 477 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags); 478 list_add(&qp->open_list, &real_qp->open_list); 479 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); 480 481 return qp; 482 } 483 484 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, 485 struct ib_qp_open_attr *qp_open_attr) 486 { 487 struct ib_qp *qp, *real_qp; 488 489 if (qp_open_attr->qp_type != IB_QPT_XRC_TGT) 490 return ERR_PTR(-EINVAL); 491 492 qp = ERR_PTR(-EINVAL); 493 mutex_lock(&xrcd->tgt_qp_mutex); 494 list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) { 495 if (real_qp->qp_num == qp_open_attr->qp_num) { 496 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler, 497 qp_open_attr->qp_context); 498 break; 499 } 500 } 501 mutex_unlock(&xrcd->tgt_qp_mutex); 502 return qp; 503 } 504 EXPORT_SYMBOL(ib_open_qp); 505 506 struct ib_qp *ib_create_qp(struct ib_pd *pd, 507 struct ib_qp_init_attr *qp_init_attr) 508 { 509 struct ib_qp *qp, *real_qp; 510 struct ib_device *device; 511 512 device = pd ? pd->device : qp_init_attr->xrcd->device; 513 qp = device->create_qp(pd, qp_init_attr, NULL); 514 515 if (!IS_ERR(qp)) { 516 qp->device = device; 517 qp->real_qp = qp; 518 qp->uobject = NULL; 519 qp->qp_type = qp_init_attr->qp_type; 520 521 atomic_set(&qp->usecnt, 0); 522 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) { 523 qp->event_handler = __ib_shared_qp_event_handler; 524 qp->qp_context = qp; 525 qp->pd = NULL; 526 qp->send_cq = qp->recv_cq = NULL; 527 qp->srq = NULL; 528 qp->xrcd = qp_init_attr->xrcd; 529 atomic_inc(&qp_init_attr->xrcd->usecnt); 530 INIT_LIST_HEAD(&qp->open_list); 531 532 real_qp = qp; 533 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler, 534 qp_init_attr->qp_context); 535 if (!IS_ERR(qp)) 536 __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp); 537 else 538 real_qp->device->destroy_qp(real_qp); 539 } else { 540 qp->event_handler = qp_init_attr->event_handler; 541 qp->qp_context = qp_init_attr->qp_context; 542 if (qp_init_attr->qp_type == IB_QPT_XRC_INI) { 543 qp->recv_cq = NULL; 544 qp->srq = NULL; 545 } else { 546 qp->recv_cq = qp_init_attr->recv_cq; 547 atomic_inc(&qp_init_attr->recv_cq->usecnt); 548 qp->srq = qp_init_attr->srq; 549 if (qp->srq) 550 atomic_inc(&qp_init_attr->srq->usecnt); 551 } 552 553 qp->pd = pd; 554 qp->send_cq = qp_init_attr->send_cq; 555 qp->xrcd = NULL; 556 557 atomic_inc(&pd->usecnt); 558 atomic_inc(&qp_init_attr->send_cq->usecnt); 559 } 560 } 561 562 return qp; 563 } 564 EXPORT_SYMBOL(ib_create_qp); 565 566 static const struct { 567 int valid; 568 enum ib_qp_attr_mask req_param[IB_QPT_MAX]; 569 enum ib_qp_attr_mask req_param_add_eth[IB_QPT_MAX]; 570 enum ib_qp_attr_mask opt_param[IB_QPT_MAX]; 571 enum ib_qp_attr_mask opt_param_add_eth[IB_QPT_MAX]; 572 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { 573 [IB_QPS_RESET] = { 574 [IB_QPS_RESET] = { .valid = 1 }, 575 [IB_QPS_INIT] = { 576 .valid = 1, 577 .req_param = { 578 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 579 IB_QP_PORT | 580 IB_QP_QKEY), 581 [IB_QPT_RAW_PACKET] = IB_QP_PORT, 582 [IB_QPT_UC] = (IB_QP_PKEY_INDEX | 583 IB_QP_PORT | 584 IB_QP_ACCESS_FLAGS), 585 [IB_QPT_RC] = (IB_QP_PKEY_INDEX | 586 IB_QP_PORT | 587 IB_QP_ACCESS_FLAGS), 588 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | 589 IB_QP_PORT | 590 IB_QP_ACCESS_FLAGS), 591 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | 592 IB_QP_PORT | 593 IB_QP_ACCESS_FLAGS), 594 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 595 IB_QP_QKEY), 596 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 597 IB_QP_QKEY), 598 } 599 }, 600 }, 601 [IB_QPS_INIT] = { 602 [IB_QPS_RESET] = { .valid = 1 }, 603 [IB_QPS_ERR] = { .valid = 1 }, 604 [IB_QPS_INIT] = { 605 .valid = 1, 606 .opt_param = { 607 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 608 IB_QP_PORT | 609 IB_QP_QKEY), 610 [IB_QPT_UC] = (IB_QP_PKEY_INDEX | 611 IB_QP_PORT | 612 IB_QP_ACCESS_FLAGS), 613 [IB_QPT_RC] = (IB_QP_PKEY_INDEX | 614 IB_QP_PORT | 615 IB_QP_ACCESS_FLAGS), 616 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | 617 IB_QP_PORT | 618 IB_QP_ACCESS_FLAGS), 619 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | 620 IB_QP_PORT | 621 IB_QP_ACCESS_FLAGS), 622 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 623 IB_QP_QKEY), 624 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 625 IB_QP_QKEY), 626 } 627 }, 628 [IB_QPS_RTR] = { 629 .valid = 1, 630 .req_param = { 631 [IB_QPT_UC] = (IB_QP_AV | 632 IB_QP_PATH_MTU | 633 IB_QP_DEST_QPN | 634 IB_QP_RQ_PSN), 635 [IB_QPT_RC] = (IB_QP_AV | 636 IB_QP_PATH_MTU | 637 IB_QP_DEST_QPN | 638 IB_QP_RQ_PSN | 639 IB_QP_MAX_DEST_RD_ATOMIC | 640 IB_QP_MIN_RNR_TIMER), 641 [IB_QPT_XRC_INI] = (IB_QP_AV | 642 IB_QP_PATH_MTU | 643 IB_QP_DEST_QPN | 644 IB_QP_RQ_PSN), 645 [IB_QPT_XRC_TGT] = (IB_QP_AV | 646 IB_QP_PATH_MTU | 647 IB_QP_DEST_QPN | 648 IB_QP_RQ_PSN | 649 IB_QP_MAX_DEST_RD_ATOMIC | 650 IB_QP_MIN_RNR_TIMER), 651 }, 652 .req_param_add_eth = { 653 [IB_QPT_RC] = (IB_QP_SMAC), 654 [IB_QPT_UC] = (IB_QP_SMAC), 655 [IB_QPT_XRC_INI] = (IB_QP_SMAC), 656 [IB_QPT_XRC_TGT] = (IB_QP_SMAC) 657 }, 658 .opt_param = { 659 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 660 IB_QP_QKEY), 661 [IB_QPT_UC] = (IB_QP_ALT_PATH | 662 IB_QP_ACCESS_FLAGS | 663 IB_QP_PKEY_INDEX), 664 [IB_QPT_RC] = (IB_QP_ALT_PATH | 665 IB_QP_ACCESS_FLAGS | 666 IB_QP_PKEY_INDEX), 667 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH | 668 IB_QP_ACCESS_FLAGS | 669 IB_QP_PKEY_INDEX), 670 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH | 671 IB_QP_ACCESS_FLAGS | 672 IB_QP_PKEY_INDEX), 673 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 674 IB_QP_QKEY), 675 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 676 IB_QP_QKEY), 677 }, 678 .opt_param_add_eth = { 679 [IB_QPT_RC] = (IB_QP_ALT_SMAC | 680 IB_QP_VID | 681 IB_QP_ALT_VID), 682 [IB_QPT_UC] = (IB_QP_ALT_SMAC | 683 IB_QP_VID | 684 IB_QP_ALT_VID), 685 [IB_QPT_XRC_INI] = (IB_QP_ALT_SMAC | 686 IB_QP_VID | 687 IB_QP_ALT_VID), 688 [IB_QPT_XRC_TGT] = (IB_QP_ALT_SMAC | 689 IB_QP_VID | 690 IB_QP_ALT_VID) 691 } 692 } 693 }, 694 [IB_QPS_RTR] = { 695 [IB_QPS_RESET] = { .valid = 1 }, 696 [IB_QPS_ERR] = { .valid = 1 }, 697 [IB_QPS_RTS] = { 698 .valid = 1, 699 .req_param = { 700 [IB_QPT_UD] = IB_QP_SQ_PSN, 701 [IB_QPT_UC] = IB_QP_SQ_PSN, 702 [IB_QPT_RC] = (IB_QP_TIMEOUT | 703 IB_QP_RETRY_CNT | 704 IB_QP_RNR_RETRY | 705 IB_QP_SQ_PSN | 706 IB_QP_MAX_QP_RD_ATOMIC), 707 [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT | 708 IB_QP_RETRY_CNT | 709 IB_QP_RNR_RETRY | 710 IB_QP_SQ_PSN | 711 IB_QP_MAX_QP_RD_ATOMIC), 712 [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT | 713 IB_QP_SQ_PSN), 714 [IB_QPT_SMI] = IB_QP_SQ_PSN, 715 [IB_QPT_GSI] = IB_QP_SQ_PSN, 716 }, 717 .opt_param = { 718 [IB_QPT_UD] = (IB_QP_CUR_STATE | 719 IB_QP_QKEY), 720 [IB_QPT_UC] = (IB_QP_CUR_STATE | 721 IB_QP_ALT_PATH | 722 IB_QP_ACCESS_FLAGS | 723 IB_QP_PATH_MIG_STATE), 724 [IB_QPT_RC] = (IB_QP_CUR_STATE | 725 IB_QP_ALT_PATH | 726 IB_QP_ACCESS_FLAGS | 727 IB_QP_MIN_RNR_TIMER | 728 IB_QP_PATH_MIG_STATE), 729 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | 730 IB_QP_ALT_PATH | 731 IB_QP_ACCESS_FLAGS | 732 IB_QP_PATH_MIG_STATE), 733 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | 734 IB_QP_ALT_PATH | 735 IB_QP_ACCESS_FLAGS | 736 IB_QP_MIN_RNR_TIMER | 737 IB_QP_PATH_MIG_STATE), 738 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 739 IB_QP_QKEY), 740 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 741 IB_QP_QKEY), 742 } 743 } 744 }, 745 [IB_QPS_RTS] = { 746 [IB_QPS_RESET] = { .valid = 1 }, 747 [IB_QPS_ERR] = { .valid = 1 }, 748 [IB_QPS_RTS] = { 749 .valid = 1, 750 .opt_param = { 751 [IB_QPT_UD] = (IB_QP_CUR_STATE | 752 IB_QP_QKEY), 753 [IB_QPT_UC] = (IB_QP_CUR_STATE | 754 IB_QP_ACCESS_FLAGS | 755 IB_QP_ALT_PATH | 756 IB_QP_PATH_MIG_STATE), 757 [IB_QPT_RC] = (IB_QP_CUR_STATE | 758 IB_QP_ACCESS_FLAGS | 759 IB_QP_ALT_PATH | 760 IB_QP_PATH_MIG_STATE | 761 IB_QP_MIN_RNR_TIMER), 762 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | 763 IB_QP_ACCESS_FLAGS | 764 IB_QP_ALT_PATH | 765 IB_QP_PATH_MIG_STATE), 766 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | 767 IB_QP_ACCESS_FLAGS | 768 IB_QP_ALT_PATH | 769 IB_QP_PATH_MIG_STATE | 770 IB_QP_MIN_RNR_TIMER), 771 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 772 IB_QP_QKEY), 773 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 774 IB_QP_QKEY), 775 } 776 }, 777 [IB_QPS_SQD] = { 778 .valid = 1, 779 .opt_param = { 780 [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY, 781 [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY, 782 [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY, 783 [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY, 784 [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */ 785 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY, 786 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY 787 } 788 }, 789 }, 790 [IB_QPS_SQD] = { 791 [IB_QPS_RESET] = { .valid = 1 }, 792 [IB_QPS_ERR] = { .valid = 1 }, 793 [IB_QPS_RTS] = { 794 .valid = 1, 795 .opt_param = { 796 [IB_QPT_UD] = (IB_QP_CUR_STATE | 797 IB_QP_QKEY), 798 [IB_QPT_UC] = (IB_QP_CUR_STATE | 799 IB_QP_ALT_PATH | 800 IB_QP_ACCESS_FLAGS | 801 IB_QP_PATH_MIG_STATE), 802 [IB_QPT_RC] = (IB_QP_CUR_STATE | 803 IB_QP_ALT_PATH | 804 IB_QP_ACCESS_FLAGS | 805 IB_QP_MIN_RNR_TIMER | 806 IB_QP_PATH_MIG_STATE), 807 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | 808 IB_QP_ALT_PATH | 809 IB_QP_ACCESS_FLAGS | 810 IB_QP_PATH_MIG_STATE), 811 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | 812 IB_QP_ALT_PATH | 813 IB_QP_ACCESS_FLAGS | 814 IB_QP_MIN_RNR_TIMER | 815 IB_QP_PATH_MIG_STATE), 816 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 817 IB_QP_QKEY), 818 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 819 IB_QP_QKEY), 820 } 821 }, 822 [IB_QPS_SQD] = { 823 .valid = 1, 824 .opt_param = { 825 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 826 IB_QP_QKEY), 827 [IB_QPT_UC] = (IB_QP_AV | 828 IB_QP_ALT_PATH | 829 IB_QP_ACCESS_FLAGS | 830 IB_QP_PKEY_INDEX | 831 IB_QP_PATH_MIG_STATE), 832 [IB_QPT_RC] = (IB_QP_PORT | 833 IB_QP_AV | 834 IB_QP_TIMEOUT | 835 IB_QP_RETRY_CNT | 836 IB_QP_RNR_RETRY | 837 IB_QP_MAX_QP_RD_ATOMIC | 838 IB_QP_MAX_DEST_RD_ATOMIC | 839 IB_QP_ALT_PATH | 840 IB_QP_ACCESS_FLAGS | 841 IB_QP_PKEY_INDEX | 842 IB_QP_MIN_RNR_TIMER | 843 IB_QP_PATH_MIG_STATE), 844 [IB_QPT_XRC_INI] = (IB_QP_PORT | 845 IB_QP_AV | 846 IB_QP_TIMEOUT | 847 IB_QP_RETRY_CNT | 848 IB_QP_RNR_RETRY | 849 IB_QP_MAX_QP_RD_ATOMIC | 850 IB_QP_ALT_PATH | 851 IB_QP_ACCESS_FLAGS | 852 IB_QP_PKEY_INDEX | 853 IB_QP_PATH_MIG_STATE), 854 [IB_QPT_XRC_TGT] = (IB_QP_PORT | 855 IB_QP_AV | 856 IB_QP_TIMEOUT | 857 IB_QP_MAX_DEST_RD_ATOMIC | 858 IB_QP_ALT_PATH | 859 IB_QP_ACCESS_FLAGS | 860 IB_QP_PKEY_INDEX | 861 IB_QP_MIN_RNR_TIMER | 862 IB_QP_PATH_MIG_STATE), 863 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 864 IB_QP_QKEY), 865 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 866 IB_QP_QKEY), 867 } 868 } 869 }, 870 [IB_QPS_SQE] = { 871 [IB_QPS_RESET] = { .valid = 1 }, 872 [IB_QPS_ERR] = { .valid = 1 }, 873 [IB_QPS_RTS] = { 874 .valid = 1, 875 .opt_param = { 876 [IB_QPT_UD] = (IB_QP_CUR_STATE | 877 IB_QP_QKEY), 878 [IB_QPT_UC] = (IB_QP_CUR_STATE | 879 IB_QP_ACCESS_FLAGS), 880 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 881 IB_QP_QKEY), 882 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 883 IB_QP_QKEY), 884 } 885 } 886 }, 887 [IB_QPS_ERR] = { 888 [IB_QPS_RESET] = { .valid = 1 }, 889 [IB_QPS_ERR] = { .valid = 1 } 890 } 891 }; 892 893 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, 894 enum ib_qp_type type, enum ib_qp_attr_mask mask, 895 enum rdma_link_layer ll) 896 { 897 enum ib_qp_attr_mask req_param, opt_param; 898 899 if (cur_state < 0 || cur_state > IB_QPS_ERR || 900 next_state < 0 || next_state > IB_QPS_ERR) 901 return 0; 902 903 if (mask & IB_QP_CUR_STATE && 904 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS && 905 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE) 906 return 0; 907 908 if (!qp_state_table[cur_state][next_state].valid) 909 return 0; 910 911 req_param = qp_state_table[cur_state][next_state].req_param[type]; 912 opt_param = qp_state_table[cur_state][next_state].opt_param[type]; 913 914 if (ll == IB_LINK_LAYER_ETHERNET) { 915 req_param |= qp_state_table[cur_state][next_state]. 916 req_param_add_eth[type]; 917 opt_param |= qp_state_table[cur_state][next_state]. 918 opt_param_add_eth[type]; 919 } 920 921 if ((mask & req_param) != req_param) 922 return 0; 923 924 if (mask & ~(req_param | opt_param | IB_QP_STATE)) 925 return 0; 926 927 return 1; 928 } 929 EXPORT_SYMBOL(ib_modify_qp_is_ok); 930 931 int ib_resolve_eth_l2_attrs(struct ib_qp *qp, 932 struct ib_qp_attr *qp_attr, int *qp_attr_mask) 933 { 934 int ret = 0; 935 union ib_gid sgid; 936 937 if ((*qp_attr_mask & IB_QP_AV) && 938 (rdma_cap_eth_ah(qp->device, qp_attr->ah_attr.port_num))) { 939 ret = ib_query_gid(qp->device, qp_attr->ah_attr.port_num, 940 qp_attr->ah_attr.grh.sgid_index, &sgid); 941 if (ret) 942 goto out; 943 if (rdma_link_local_addr((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw)) { 944 rdma_get_ll_mac((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw, qp_attr->ah_attr.dmac); 945 rdma_get_ll_mac((struct in6_addr *)sgid.raw, qp_attr->smac); 946 if (!(*qp_attr_mask & IB_QP_VID)) 947 qp_attr->vlan_id = rdma_get_vlan_id(&sgid); 948 } else { 949 ret = rdma_addr_find_dmac_by_grh(&sgid, &qp_attr->ah_attr.grh.dgid, 950 qp_attr->ah_attr.dmac, &qp_attr->vlan_id); 951 if (ret) 952 goto out; 953 ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr->smac, NULL); 954 if (ret) 955 goto out; 956 } 957 *qp_attr_mask |= IB_QP_SMAC; 958 if (qp_attr->vlan_id < 0xFFFF) 959 *qp_attr_mask |= IB_QP_VID; 960 } 961 out: 962 return ret; 963 } 964 EXPORT_SYMBOL(ib_resolve_eth_l2_attrs); 965 966 967 int ib_modify_qp(struct ib_qp *qp, 968 struct ib_qp_attr *qp_attr, 969 int qp_attr_mask) 970 { 971 int ret; 972 973 ret = ib_resolve_eth_l2_attrs(qp, qp_attr, &qp_attr_mask); 974 if (ret) 975 return ret; 976 977 return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); 978 } 979 EXPORT_SYMBOL(ib_modify_qp); 980 981 int ib_query_qp(struct ib_qp *qp, 982 struct ib_qp_attr *qp_attr, 983 int qp_attr_mask, 984 struct ib_qp_init_attr *qp_init_attr) 985 { 986 return qp->device->query_qp ? 987 qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) : 988 -ENOSYS; 989 } 990 EXPORT_SYMBOL(ib_query_qp); 991 992 int ib_close_qp(struct ib_qp *qp) 993 { 994 struct ib_qp *real_qp; 995 unsigned long flags; 996 997 real_qp = qp->real_qp; 998 if (real_qp == qp) 999 return -EINVAL; 1000 1001 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags); 1002 list_del(&qp->open_list); 1003 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); 1004 1005 atomic_dec(&real_qp->usecnt); 1006 kfree(qp); 1007 1008 return 0; 1009 } 1010 EXPORT_SYMBOL(ib_close_qp); 1011 1012 static int __ib_destroy_shared_qp(struct ib_qp *qp) 1013 { 1014 struct ib_xrcd *xrcd; 1015 struct ib_qp *real_qp; 1016 int ret; 1017 1018 real_qp = qp->real_qp; 1019 xrcd = real_qp->xrcd; 1020 1021 mutex_lock(&xrcd->tgt_qp_mutex); 1022 ib_close_qp(qp); 1023 if (atomic_read(&real_qp->usecnt) == 0) 1024 list_del(&real_qp->xrcd_list); 1025 else 1026 real_qp = NULL; 1027 mutex_unlock(&xrcd->tgt_qp_mutex); 1028 1029 if (real_qp) { 1030 ret = ib_destroy_qp(real_qp); 1031 if (!ret) 1032 atomic_dec(&xrcd->usecnt); 1033 else 1034 __ib_insert_xrcd_qp(xrcd, real_qp); 1035 } 1036 1037 return 0; 1038 } 1039 1040 int ib_destroy_qp(struct ib_qp *qp) 1041 { 1042 struct ib_pd *pd; 1043 struct ib_cq *scq, *rcq; 1044 struct ib_srq *srq; 1045 int ret; 1046 1047 if (atomic_read(&qp->usecnt)) 1048 return -EBUSY; 1049 1050 if (qp->real_qp != qp) 1051 return __ib_destroy_shared_qp(qp); 1052 1053 pd = qp->pd; 1054 scq = qp->send_cq; 1055 rcq = qp->recv_cq; 1056 srq = qp->srq; 1057 1058 ret = qp->device->destroy_qp(qp); 1059 if (!ret) { 1060 if (pd) 1061 atomic_dec(&pd->usecnt); 1062 if (scq) 1063 atomic_dec(&scq->usecnt); 1064 if (rcq) 1065 atomic_dec(&rcq->usecnt); 1066 if (srq) 1067 atomic_dec(&srq->usecnt); 1068 } 1069 1070 return ret; 1071 } 1072 EXPORT_SYMBOL(ib_destroy_qp); 1073 1074 /* Completion queues */ 1075 1076 struct ib_cq *ib_create_cq(struct ib_device *device, 1077 ib_comp_handler comp_handler, 1078 void (*event_handler)(struct ib_event *, void *), 1079 void *cq_context, 1080 const struct ib_cq_init_attr *cq_attr) 1081 { 1082 struct ib_cq *cq; 1083 1084 cq = device->create_cq(device, cq_attr, NULL, NULL); 1085 1086 if (!IS_ERR(cq)) { 1087 cq->device = device; 1088 cq->uobject = NULL; 1089 cq->comp_handler = comp_handler; 1090 cq->event_handler = event_handler; 1091 cq->cq_context = cq_context; 1092 atomic_set(&cq->usecnt, 0); 1093 } 1094 1095 return cq; 1096 } 1097 EXPORT_SYMBOL(ib_create_cq); 1098 1099 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) 1100 { 1101 return cq->device->modify_cq ? 1102 cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS; 1103 } 1104 EXPORT_SYMBOL(ib_modify_cq); 1105 1106 int ib_destroy_cq(struct ib_cq *cq) 1107 { 1108 if (atomic_read(&cq->usecnt)) 1109 return -EBUSY; 1110 1111 return cq->device->destroy_cq(cq); 1112 } 1113 EXPORT_SYMBOL(ib_destroy_cq); 1114 1115 int ib_resize_cq(struct ib_cq *cq, int cqe) 1116 { 1117 return cq->device->resize_cq ? 1118 cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS; 1119 } 1120 EXPORT_SYMBOL(ib_resize_cq); 1121 1122 /* Memory regions */ 1123 1124 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags) 1125 { 1126 struct ib_mr *mr; 1127 int err; 1128 1129 err = ib_check_mr_access(mr_access_flags); 1130 if (err) 1131 return ERR_PTR(err); 1132 1133 mr = pd->device->get_dma_mr(pd, mr_access_flags); 1134 1135 if (!IS_ERR(mr)) { 1136 mr->device = pd->device; 1137 mr->pd = pd; 1138 mr->uobject = NULL; 1139 atomic_inc(&pd->usecnt); 1140 atomic_set(&mr->usecnt, 0); 1141 } 1142 1143 return mr; 1144 } 1145 EXPORT_SYMBOL(ib_get_dma_mr); 1146 1147 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd, 1148 struct ib_phys_buf *phys_buf_array, 1149 int num_phys_buf, 1150 int mr_access_flags, 1151 u64 *iova_start) 1152 { 1153 struct ib_mr *mr; 1154 int err; 1155 1156 err = ib_check_mr_access(mr_access_flags); 1157 if (err) 1158 return ERR_PTR(err); 1159 1160 if (!pd->device->reg_phys_mr) 1161 return ERR_PTR(-ENOSYS); 1162 1163 mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf, 1164 mr_access_flags, iova_start); 1165 1166 if (!IS_ERR(mr)) { 1167 mr->device = pd->device; 1168 mr->pd = pd; 1169 mr->uobject = NULL; 1170 atomic_inc(&pd->usecnt); 1171 atomic_set(&mr->usecnt, 0); 1172 } 1173 1174 return mr; 1175 } 1176 EXPORT_SYMBOL(ib_reg_phys_mr); 1177 1178 int ib_rereg_phys_mr(struct ib_mr *mr, 1179 int mr_rereg_mask, 1180 struct ib_pd *pd, 1181 struct ib_phys_buf *phys_buf_array, 1182 int num_phys_buf, 1183 int mr_access_flags, 1184 u64 *iova_start) 1185 { 1186 struct ib_pd *old_pd; 1187 int ret; 1188 1189 ret = ib_check_mr_access(mr_access_flags); 1190 if (ret) 1191 return ret; 1192 1193 if (!mr->device->rereg_phys_mr) 1194 return -ENOSYS; 1195 1196 if (atomic_read(&mr->usecnt)) 1197 return -EBUSY; 1198 1199 old_pd = mr->pd; 1200 1201 ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd, 1202 phys_buf_array, num_phys_buf, 1203 mr_access_flags, iova_start); 1204 1205 if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) { 1206 atomic_dec(&old_pd->usecnt); 1207 atomic_inc(&pd->usecnt); 1208 } 1209 1210 return ret; 1211 } 1212 EXPORT_SYMBOL(ib_rereg_phys_mr); 1213 1214 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr) 1215 { 1216 return mr->device->query_mr ? 1217 mr->device->query_mr(mr, mr_attr) : -ENOSYS; 1218 } 1219 EXPORT_SYMBOL(ib_query_mr); 1220 1221 int ib_dereg_mr(struct ib_mr *mr) 1222 { 1223 struct ib_pd *pd; 1224 int ret; 1225 1226 if (atomic_read(&mr->usecnt)) 1227 return -EBUSY; 1228 1229 pd = mr->pd; 1230 ret = mr->device->dereg_mr(mr); 1231 if (!ret) 1232 atomic_dec(&pd->usecnt); 1233 1234 return ret; 1235 } 1236 EXPORT_SYMBOL(ib_dereg_mr); 1237 1238 struct ib_mr *ib_create_mr(struct ib_pd *pd, 1239 struct ib_mr_init_attr *mr_init_attr) 1240 { 1241 struct ib_mr *mr; 1242 1243 if (!pd->device->create_mr) 1244 return ERR_PTR(-ENOSYS); 1245 1246 mr = pd->device->create_mr(pd, mr_init_attr); 1247 1248 if (!IS_ERR(mr)) { 1249 mr->device = pd->device; 1250 mr->pd = pd; 1251 mr->uobject = NULL; 1252 atomic_inc(&pd->usecnt); 1253 atomic_set(&mr->usecnt, 0); 1254 } 1255 1256 return mr; 1257 } 1258 EXPORT_SYMBOL(ib_create_mr); 1259 1260 int ib_destroy_mr(struct ib_mr *mr) 1261 { 1262 struct ib_pd *pd; 1263 int ret; 1264 1265 if (atomic_read(&mr->usecnt)) 1266 return -EBUSY; 1267 1268 pd = mr->pd; 1269 ret = mr->device->destroy_mr(mr); 1270 if (!ret) 1271 atomic_dec(&pd->usecnt); 1272 1273 return ret; 1274 } 1275 EXPORT_SYMBOL(ib_destroy_mr); 1276 1277 struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len) 1278 { 1279 struct ib_mr *mr; 1280 1281 if (!pd->device->alloc_fast_reg_mr) 1282 return ERR_PTR(-ENOSYS); 1283 1284 mr = pd->device->alloc_fast_reg_mr(pd, max_page_list_len); 1285 1286 if (!IS_ERR(mr)) { 1287 mr->device = pd->device; 1288 mr->pd = pd; 1289 mr->uobject = NULL; 1290 atomic_inc(&pd->usecnt); 1291 atomic_set(&mr->usecnt, 0); 1292 } 1293 1294 return mr; 1295 } 1296 EXPORT_SYMBOL(ib_alloc_fast_reg_mr); 1297 1298 struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(struct ib_device *device, 1299 int max_page_list_len) 1300 { 1301 struct ib_fast_reg_page_list *page_list; 1302 1303 if (!device->alloc_fast_reg_page_list) 1304 return ERR_PTR(-ENOSYS); 1305 1306 page_list = device->alloc_fast_reg_page_list(device, max_page_list_len); 1307 1308 if (!IS_ERR(page_list)) { 1309 page_list->device = device; 1310 page_list->max_page_list_len = max_page_list_len; 1311 } 1312 1313 return page_list; 1314 } 1315 EXPORT_SYMBOL(ib_alloc_fast_reg_page_list); 1316 1317 void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list) 1318 { 1319 page_list->device->free_fast_reg_page_list(page_list); 1320 } 1321 EXPORT_SYMBOL(ib_free_fast_reg_page_list); 1322 1323 /* Memory windows */ 1324 1325 struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type) 1326 { 1327 struct ib_mw *mw; 1328 1329 if (!pd->device->alloc_mw) 1330 return ERR_PTR(-ENOSYS); 1331 1332 mw = pd->device->alloc_mw(pd, type); 1333 if (!IS_ERR(mw)) { 1334 mw->device = pd->device; 1335 mw->pd = pd; 1336 mw->uobject = NULL; 1337 mw->type = type; 1338 atomic_inc(&pd->usecnt); 1339 } 1340 1341 return mw; 1342 } 1343 EXPORT_SYMBOL(ib_alloc_mw); 1344 1345 int ib_dealloc_mw(struct ib_mw *mw) 1346 { 1347 struct ib_pd *pd; 1348 int ret; 1349 1350 pd = mw->pd; 1351 ret = mw->device->dealloc_mw(mw); 1352 if (!ret) 1353 atomic_dec(&pd->usecnt); 1354 1355 return ret; 1356 } 1357 EXPORT_SYMBOL(ib_dealloc_mw); 1358 1359 /* "Fast" memory regions */ 1360 1361 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, 1362 int mr_access_flags, 1363 struct ib_fmr_attr *fmr_attr) 1364 { 1365 struct ib_fmr *fmr; 1366 1367 if (!pd->device->alloc_fmr) 1368 return ERR_PTR(-ENOSYS); 1369 1370 fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr); 1371 if (!IS_ERR(fmr)) { 1372 fmr->device = pd->device; 1373 fmr->pd = pd; 1374 atomic_inc(&pd->usecnt); 1375 } 1376 1377 return fmr; 1378 } 1379 EXPORT_SYMBOL(ib_alloc_fmr); 1380 1381 int ib_unmap_fmr(struct list_head *fmr_list) 1382 { 1383 struct ib_fmr *fmr; 1384 1385 if (list_empty(fmr_list)) 1386 return 0; 1387 1388 fmr = list_entry(fmr_list->next, struct ib_fmr, list); 1389 return fmr->device->unmap_fmr(fmr_list); 1390 } 1391 EXPORT_SYMBOL(ib_unmap_fmr); 1392 1393 int ib_dealloc_fmr(struct ib_fmr *fmr) 1394 { 1395 struct ib_pd *pd; 1396 int ret; 1397 1398 pd = fmr->pd; 1399 ret = fmr->device->dealloc_fmr(fmr); 1400 if (!ret) 1401 atomic_dec(&pd->usecnt); 1402 1403 return ret; 1404 } 1405 EXPORT_SYMBOL(ib_dealloc_fmr); 1406 1407 /* Multicast groups */ 1408 1409 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) 1410 { 1411 int ret; 1412 1413 if (!qp->device->attach_mcast) 1414 return -ENOSYS; 1415 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD) 1416 return -EINVAL; 1417 1418 ret = qp->device->attach_mcast(qp, gid, lid); 1419 if (!ret) 1420 atomic_inc(&qp->usecnt); 1421 return ret; 1422 } 1423 EXPORT_SYMBOL(ib_attach_mcast); 1424 1425 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) 1426 { 1427 int ret; 1428 1429 if (!qp->device->detach_mcast) 1430 return -ENOSYS; 1431 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD) 1432 return -EINVAL; 1433 1434 ret = qp->device->detach_mcast(qp, gid, lid); 1435 if (!ret) 1436 atomic_dec(&qp->usecnt); 1437 return ret; 1438 } 1439 EXPORT_SYMBOL(ib_detach_mcast); 1440 1441 struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device) 1442 { 1443 struct ib_xrcd *xrcd; 1444 1445 if (!device->alloc_xrcd) 1446 return ERR_PTR(-ENOSYS); 1447 1448 xrcd = device->alloc_xrcd(device, NULL, NULL); 1449 if (!IS_ERR(xrcd)) { 1450 xrcd->device = device; 1451 xrcd->inode = NULL; 1452 atomic_set(&xrcd->usecnt, 0); 1453 mutex_init(&xrcd->tgt_qp_mutex); 1454 INIT_LIST_HEAD(&xrcd->tgt_qp_list); 1455 } 1456 1457 return xrcd; 1458 } 1459 EXPORT_SYMBOL(ib_alloc_xrcd); 1460 1461 int ib_dealloc_xrcd(struct ib_xrcd *xrcd) 1462 { 1463 struct ib_qp *qp; 1464 int ret; 1465 1466 if (atomic_read(&xrcd->usecnt)) 1467 return -EBUSY; 1468 1469 while (!list_empty(&xrcd->tgt_qp_list)) { 1470 qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list); 1471 ret = ib_destroy_qp(qp); 1472 if (ret) 1473 return ret; 1474 } 1475 1476 return xrcd->device->dealloc_xrcd(xrcd); 1477 } 1478 EXPORT_SYMBOL(ib_dealloc_xrcd); 1479 1480 struct ib_flow *ib_create_flow(struct ib_qp *qp, 1481 struct ib_flow_attr *flow_attr, 1482 int domain) 1483 { 1484 struct ib_flow *flow_id; 1485 if (!qp->device->create_flow) 1486 return ERR_PTR(-ENOSYS); 1487 1488 flow_id = qp->device->create_flow(qp, flow_attr, domain); 1489 if (!IS_ERR(flow_id)) 1490 atomic_inc(&qp->usecnt); 1491 return flow_id; 1492 } 1493 EXPORT_SYMBOL(ib_create_flow); 1494 1495 int ib_destroy_flow(struct ib_flow *flow_id) 1496 { 1497 int err; 1498 struct ib_qp *qp = flow_id->qp; 1499 1500 err = qp->device->destroy_flow(flow_id); 1501 if (!err) 1502 atomic_dec(&qp->usecnt); 1503 return err; 1504 } 1505 EXPORT_SYMBOL(ib_destroy_flow); 1506 1507 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, 1508 struct ib_mr_status *mr_status) 1509 { 1510 return mr->device->check_mr_status ? 1511 mr->device->check_mr_status(mr, check_mask, mr_status) : -ENOSYS; 1512 } 1513 EXPORT_SYMBOL(ib_check_mr_status); 1514