1 /* 2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 8 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. 9 * 10 * This software is available to you under a choice of one of two 11 * licenses. You may choose to be licensed under the terms of the GNU 12 * General Public License (GPL) Version 2, available from the file 13 * COPYING in the main directory of this source tree, or the 14 * OpenIB.org BSD license below: 15 * 16 * Redistribution and use in source and binary forms, with or 17 * without modification, are permitted provided that the following 18 * conditions are met: 19 * 20 * - Redistributions of source code must retain the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer. 23 * 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials 27 * provided with the distribution. 28 * 29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 36 * SOFTWARE. 37 */ 38 39 #include <linux/errno.h> 40 #include <linux/err.h> 41 #include <linux/export.h> 42 #include <linux/string.h> 43 #include <linux/slab.h> 44 #include <linux/in.h> 45 #include <linux/in6.h> 46 #include <net/addrconf.h> 47 #include <linux/security.h> 48 49 #include <rdma/ib_verbs.h> 50 #include <rdma/ib_cache.h> 51 #include <rdma/ib_addr.h> 52 #include <rdma/rw.h> 53 #include <rdma/lag.h> 54 55 #include "core_priv.h" 56 #include <trace/events/rdma_core.h> 57 58 static int ib_resolve_eth_dmac(struct ib_device *device, 59 struct rdma_ah_attr *ah_attr); 60 61 static const char * const ib_events[] = { 62 [IB_EVENT_CQ_ERR] = "CQ error", 63 [IB_EVENT_QP_FATAL] = "QP fatal error", 64 [IB_EVENT_QP_REQ_ERR] = "QP request error", 65 [IB_EVENT_QP_ACCESS_ERR] = "QP access error", 66 [IB_EVENT_COMM_EST] = "communication established", 67 [IB_EVENT_SQ_DRAINED] = "send queue drained", 68 [IB_EVENT_PATH_MIG] = "path migration successful", 69 [IB_EVENT_PATH_MIG_ERR] = "path migration error", 70 [IB_EVENT_DEVICE_FATAL] = "device fatal error", 71 [IB_EVENT_PORT_ACTIVE] = "port active", 72 [IB_EVENT_PORT_ERR] = "port error", 73 [IB_EVENT_LID_CHANGE] = "LID change", 74 [IB_EVENT_PKEY_CHANGE] = "P_key change", 75 [IB_EVENT_SM_CHANGE] = "SM change", 76 [IB_EVENT_SRQ_ERR] = "SRQ error", 77 [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached", 78 [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached", 79 [IB_EVENT_CLIENT_REREGISTER] = "client reregister", 80 [IB_EVENT_GID_CHANGE] = "GID changed", 81 }; 82 83 const char *__attribute_const__ ib_event_msg(enum ib_event_type event) 84 { 85 size_t index = event; 86 87 return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ? 88 ib_events[index] : "unrecognized event"; 89 } 90 EXPORT_SYMBOL(ib_event_msg); 91 92 static const char * const wc_statuses[] = { 93 [IB_WC_SUCCESS] = "success", 94 [IB_WC_LOC_LEN_ERR] = "local length error", 95 [IB_WC_LOC_QP_OP_ERR] = "local QP operation error", 96 [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error", 97 [IB_WC_LOC_PROT_ERR] = "local protection error", 98 [IB_WC_WR_FLUSH_ERR] = "WR flushed", 99 [IB_WC_MW_BIND_ERR] = "memory management operation error", 100 [IB_WC_BAD_RESP_ERR] = "bad response error", 101 [IB_WC_LOC_ACCESS_ERR] = "local access error", 102 [IB_WC_REM_INV_REQ_ERR] = "invalid request error", 103 [IB_WC_REM_ACCESS_ERR] = "remote access error", 104 [IB_WC_REM_OP_ERR] = "remote operation error", 105 [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded", 106 [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded", 107 [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error", 108 [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request", 109 [IB_WC_REM_ABORT_ERR] = "operation aborted", 110 [IB_WC_INV_EECN_ERR] = "invalid EE context number", 111 [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state", 112 [IB_WC_FATAL_ERR] = "fatal error", 113 [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error", 114 [IB_WC_GENERAL_ERR] = "general error", 115 }; 116 117 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status) 118 { 119 size_t index = status; 120 121 return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ? 122 wc_statuses[index] : "unrecognized status"; 123 } 124 EXPORT_SYMBOL(ib_wc_status_msg); 125 126 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate) 127 { 128 switch (rate) { 129 case IB_RATE_2_5_GBPS: return 1; 130 case IB_RATE_5_GBPS: return 2; 131 case IB_RATE_10_GBPS: return 4; 132 case IB_RATE_20_GBPS: return 8; 133 case IB_RATE_30_GBPS: return 12; 134 case IB_RATE_40_GBPS: return 16; 135 case IB_RATE_60_GBPS: return 24; 136 case IB_RATE_80_GBPS: return 32; 137 case IB_RATE_120_GBPS: return 48; 138 case IB_RATE_14_GBPS: return 6; 139 case IB_RATE_56_GBPS: return 22; 140 case IB_RATE_112_GBPS: return 45; 141 case IB_RATE_168_GBPS: return 67; 142 case IB_RATE_25_GBPS: return 10; 143 case IB_RATE_100_GBPS: return 40; 144 case IB_RATE_200_GBPS: return 80; 145 case IB_RATE_300_GBPS: return 120; 146 case IB_RATE_28_GBPS: return 11; 147 case IB_RATE_50_GBPS: return 20; 148 case IB_RATE_400_GBPS: return 160; 149 case IB_RATE_600_GBPS: return 240; 150 default: return -1; 151 } 152 } 153 EXPORT_SYMBOL(ib_rate_to_mult); 154 155 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult) 156 { 157 switch (mult) { 158 case 1: return IB_RATE_2_5_GBPS; 159 case 2: return IB_RATE_5_GBPS; 160 case 4: return IB_RATE_10_GBPS; 161 case 8: return IB_RATE_20_GBPS; 162 case 12: return IB_RATE_30_GBPS; 163 case 16: return IB_RATE_40_GBPS; 164 case 24: return IB_RATE_60_GBPS; 165 case 32: return IB_RATE_80_GBPS; 166 case 48: return IB_RATE_120_GBPS; 167 case 6: return IB_RATE_14_GBPS; 168 case 22: return IB_RATE_56_GBPS; 169 case 45: return IB_RATE_112_GBPS; 170 case 67: return IB_RATE_168_GBPS; 171 case 10: return IB_RATE_25_GBPS; 172 case 40: return IB_RATE_100_GBPS; 173 case 80: return IB_RATE_200_GBPS; 174 case 120: return IB_RATE_300_GBPS; 175 case 11: return IB_RATE_28_GBPS; 176 case 20: return IB_RATE_50_GBPS; 177 case 160: return IB_RATE_400_GBPS; 178 case 240: return IB_RATE_600_GBPS; 179 default: return IB_RATE_PORT_CURRENT; 180 } 181 } 182 EXPORT_SYMBOL(mult_to_ib_rate); 183 184 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate) 185 { 186 switch (rate) { 187 case IB_RATE_2_5_GBPS: return 2500; 188 case IB_RATE_5_GBPS: return 5000; 189 case IB_RATE_10_GBPS: return 10000; 190 case IB_RATE_20_GBPS: return 20000; 191 case IB_RATE_30_GBPS: return 30000; 192 case IB_RATE_40_GBPS: return 40000; 193 case IB_RATE_60_GBPS: return 60000; 194 case IB_RATE_80_GBPS: return 80000; 195 case IB_RATE_120_GBPS: return 120000; 196 case IB_RATE_14_GBPS: return 14062; 197 case IB_RATE_56_GBPS: return 56250; 198 case IB_RATE_112_GBPS: return 112500; 199 case IB_RATE_168_GBPS: return 168750; 200 case IB_RATE_25_GBPS: return 25781; 201 case IB_RATE_100_GBPS: return 103125; 202 case IB_RATE_200_GBPS: return 206250; 203 case IB_RATE_300_GBPS: return 309375; 204 case IB_RATE_28_GBPS: return 28125; 205 case IB_RATE_50_GBPS: return 53125; 206 case IB_RATE_400_GBPS: return 425000; 207 case IB_RATE_600_GBPS: return 637500; 208 default: return -1; 209 } 210 } 211 EXPORT_SYMBOL(ib_rate_to_mbps); 212 213 __attribute_const__ enum rdma_transport_type 214 rdma_node_get_transport(unsigned int node_type) 215 { 216 217 if (node_type == RDMA_NODE_USNIC) 218 return RDMA_TRANSPORT_USNIC; 219 if (node_type == RDMA_NODE_USNIC_UDP) 220 return RDMA_TRANSPORT_USNIC_UDP; 221 if (node_type == RDMA_NODE_RNIC) 222 return RDMA_TRANSPORT_IWARP; 223 if (node_type == RDMA_NODE_UNSPECIFIED) 224 return RDMA_TRANSPORT_UNSPECIFIED; 225 226 return RDMA_TRANSPORT_IB; 227 } 228 EXPORT_SYMBOL(rdma_node_get_transport); 229 230 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num) 231 { 232 enum rdma_transport_type lt; 233 if (device->ops.get_link_layer) 234 return device->ops.get_link_layer(device, port_num); 235 236 lt = rdma_node_get_transport(device->node_type); 237 if (lt == RDMA_TRANSPORT_IB) 238 return IB_LINK_LAYER_INFINIBAND; 239 240 return IB_LINK_LAYER_ETHERNET; 241 } 242 EXPORT_SYMBOL(rdma_port_get_link_layer); 243 244 /* Protection domains */ 245 246 /** 247 * ib_alloc_pd - Allocates an unused protection domain. 248 * @device: The device on which to allocate the protection domain. 249 * @flags: protection domain flags 250 * @caller: caller's build-time module name 251 * 252 * A protection domain object provides an association between QPs, shared 253 * receive queues, address handles, memory regions, and memory windows. 254 * 255 * Every PD has a local_dma_lkey which can be used as the lkey value for local 256 * memory operations. 257 */ 258 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, 259 const char *caller) 260 { 261 struct ib_pd *pd; 262 int mr_access_flags = 0; 263 int ret; 264 265 pd = rdma_zalloc_drv_obj(device, ib_pd); 266 if (!pd) 267 return ERR_PTR(-ENOMEM); 268 269 pd->device = device; 270 pd->uobject = NULL; 271 pd->__internal_mr = NULL; 272 atomic_set(&pd->usecnt, 0); 273 pd->flags = flags; 274 275 rdma_restrack_new(&pd->res, RDMA_RESTRACK_PD); 276 rdma_restrack_set_name(&pd->res, caller); 277 278 ret = device->ops.alloc_pd(pd, NULL); 279 if (ret) { 280 rdma_restrack_put(&pd->res); 281 kfree(pd); 282 return ERR_PTR(ret); 283 } 284 rdma_restrack_add(&pd->res); 285 286 if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) 287 pd->local_dma_lkey = device->local_dma_lkey; 288 else 289 mr_access_flags |= IB_ACCESS_LOCAL_WRITE; 290 291 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) { 292 pr_warn("%s: enabling unsafe global rkey\n", caller); 293 mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE; 294 } 295 296 if (mr_access_flags) { 297 struct ib_mr *mr; 298 299 mr = pd->device->ops.get_dma_mr(pd, mr_access_flags); 300 if (IS_ERR(mr)) { 301 ib_dealloc_pd(pd); 302 return ERR_CAST(mr); 303 } 304 305 mr->device = pd->device; 306 mr->pd = pd; 307 mr->type = IB_MR_TYPE_DMA; 308 mr->uobject = NULL; 309 mr->need_inval = false; 310 311 pd->__internal_mr = mr; 312 313 if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) 314 pd->local_dma_lkey = pd->__internal_mr->lkey; 315 316 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) 317 pd->unsafe_global_rkey = pd->__internal_mr->rkey; 318 } 319 320 return pd; 321 } 322 EXPORT_SYMBOL(__ib_alloc_pd); 323 324 /** 325 * ib_dealloc_pd_user - Deallocates a protection domain. 326 * @pd: The protection domain to deallocate. 327 * @udata: Valid user data or NULL for kernel object 328 * 329 * It is an error to call this function while any resources in the pd still 330 * exist. The caller is responsible to synchronously destroy them and 331 * guarantee no new allocations will happen. 332 */ 333 int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata) 334 { 335 int ret; 336 337 if (pd->__internal_mr) { 338 ret = pd->device->ops.dereg_mr(pd->__internal_mr, NULL); 339 WARN_ON(ret); 340 pd->__internal_mr = NULL; 341 } 342 343 /* uverbs manipulates usecnt with proper locking, while the kabi 344 requires the caller to guarantee we can't race here. */ 345 WARN_ON(atomic_read(&pd->usecnt)); 346 347 ret = pd->device->ops.dealloc_pd(pd, udata); 348 if (ret) 349 return ret; 350 351 rdma_restrack_del(&pd->res); 352 kfree(pd); 353 return ret; 354 } 355 EXPORT_SYMBOL(ib_dealloc_pd_user); 356 357 /* Address handles */ 358 359 /** 360 * rdma_copy_ah_attr - Copy rdma ah attribute from source to destination. 361 * @dest: Pointer to destination ah_attr. Contents of the destination 362 * pointer is assumed to be invalid and attribute are overwritten. 363 * @src: Pointer to source ah_attr. 364 */ 365 void rdma_copy_ah_attr(struct rdma_ah_attr *dest, 366 const struct rdma_ah_attr *src) 367 { 368 *dest = *src; 369 if (dest->grh.sgid_attr) 370 rdma_hold_gid_attr(dest->grh.sgid_attr); 371 } 372 EXPORT_SYMBOL(rdma_copy_ah_attr); 373 374 /** 375 * rdma_replace_ah_attr - Replace valid ah_attr with new new one. 376 * @old: Pointer to existing ah_attr which needs to be replaced. 377 * old is assumed to be valid or zero'd 378 * @new: Pointer to the new ah_attr. 379 * 380 * rdma_replace_ah_attr() first releases any reference in the old ah_attr if 381 * old the ah_attr is valid; after that it copies the new attribute and holds 382 * the reference to the replaced ah_attr. 383 */ 384 void rdma_replace_ah_attr(struct rdma_ah_attr *old, 385 const struct rdma_ah_attr *new) 386 { 387 rdma_destroy_ah_attr(old); 388 *old = *new; 389 if (old->grh.sgid_attr) 390 rdma_hold_gid_attr(old->grh.sgid_attr); 391 } 392 EXPORT_SYMBOL(rdma_replace_ah_attr); 393 394 /** 395 * rdma_move_ah_attr - Move ah_attr pointed by source to destination. 396 * @dest: Pointer to destination ah_attr to copy to. 397 * dest is assumed to be valid or zero'd 398 * @src: Pointer to the new ah_attr. 399 * 400 * rdma_move_ah_attr() first releases any reference in the destination ah_attr 401 * if it is valid. This also transfers ownership of internal references from 402 * src to dest, making src invalid in the process. No new reference of the src 403 * ah_attr is taken. 404 */ 405 void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src) 406 { 407 rdma_destroy_ah_attr(dest); 408 *dest = *src; 409 src->grh.sgid_attr = NULL; 410 } 411 EXPORT_SYMBOL(rdma_move_ah_attr); 412 413 /* 414 * Validate that the rdma_ah_attr is valid for the device before passing it 415 * off to the driver. 416 */ 417 static int rdma_check_ah_attr(struct ib_device *device, 418 struct rdma_ah_attr *ah_attr) 419 { 420 if (!rdma_is_port_valid(device, ah_attr->port_num)) 421 return -EINVAL; 422 423 if ((rdma_is_grh_required(device, ah_attr->port_num) || 424 ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) && 425 !(ah_attr->ah_flags & IB_AH_GRH)) 426 return -EINVAL; 427 428 if (ah_attr->grh.sgid_attr) { 429 /* 430 * Make sure the passed sgid_attr is consistent with the 431 * parameters 432 */ 433 if (ah_attr->grh.sgid_attr->index != ah_attr->grh.sgid_index || 434 ah_attr->grh.sgid_attr->port_num != ah_attr->port_num) 435 return -EINVAL; 436 } 437 return 0; 438 } 439 440 /* 441 * If the ah requires a GRH then ensure that sgid_attr pointer is filled in. 442 * On success the caller is responsible to call rdma_unfill_sgid_attr(). 443 */ 444 static int rdma_fill_sgid_attr(struct ib_device *device, 445 struct rdma_ah_attr *ah_attr, 446 const struct ib_gid_attr **old_sgid_attr) 447 { 448 const struct ib_gid_attr *sgid_attr; 449 struct ib_global_route *grh; 450 int ret; 451 452 *old_sgid_attr = ah_attr->grh.sgid_attr; 453 454 ret = rdma_check_ah_attr(device, ah_attr); 455 if (ret) 456 return ret; 457 458 if (!(ah_attr->ah_flags & IB_AH_GRH)) 459 return 0; 460 461 grh = rdma_ah_retrieve_grh(ah_attr); 462 if (grh->sgid_attr) 463 return 0; 464 465 sgid_attr = 466 rdma_get_gid_attr(device, ah_attr->port_num, grh->sgid_index); 467 if (IS_ERR(sgid_attr)) 468 return PTR_ERR(sgid_attr); 469 470 /* Move ownerhip of the kref into the ah_attr */ 471 grh->sgid_attr = sgid_attr; 472 return 0; 473 } 474 475 static void rdma_unfill_sgid_attr(struct rdma_ah_attr *ah_attr, 476 const struct ib_gid_attr *old_sgid_attr) 477 { 478 /* 479 * Fill didn't change anything, the caller retains ownership of 480 * whatever it passed 481 */ 482 if (ah_attr->grh.sgid_attr == old_sgid_attr) 483 return; 484 485 /* 486 * Otherwise, we need to undo what rdma_fill_sgid_attr so the caller 487 * doesn't see any change in the rdma_ah_attr. If we get here 488 * old_sgid_attr is NULL. 489 */ 490 rdma_destroy_ah_attr(ah_attr); 491 } 492 493 static const struct ib_gid_attr * 494 rdma_update_sgid_attr(struct rdma_ah_attr *ah_attr, 495 const struct ib_gid_attr *old_attr) 496 { 497 if (old_attr) 498 rdma_put_gid_attr(old_attr); 499 if (ah_attr->ah_flags & IB_AH_GRH) { 500 rdma_hold_gid_attr(ah_attr->grh.sgid_attr); 501 return ah_attr->grh.sgid_attr; 502 } 503 return NULL; 504 } 505 506 static struct ib_ah *_rdma_create_ah(struct ib_pd *pd, 507 struct rdma_ah_attr *ah_attr, 508 u32 flags, 509 struct ib_udata *udata, 510 struct net_device *xmit_slave) 511 { 512 struct rdma_ah_init_attr init_attr = {}; 513 struct ib_device *device = pd->device; 514 struct ib_ah *ah; 515 int ret; 516 517 might_sleep_if(flags & RDMA_CREATE_AH_SLEEPABLE); 518 519 if (!device->ops.create_ah) 520 return ERR_PTR(-EOPNOTSUPP); 521 522 ah = rdma_zalloc_drv_obj_gfp( 523 device, ib_ah, 524 (flags & RDMA_CREATE_AH_SLEEPABLE) ? GFP_KERNEL : GFP_ATOMIC); 525 if (!ah) 526 return ERR_PTR(-ENOMEM); 527 528 ah->device = device; 529 ah->pd = pd; 530 ah->type = ah_attr->type; 531 ah->sgid_attr = rdma_update_sgid_attr(ah_attr, NULL); 532 init_attr.ah_attr = ah_attr; 533 init_attr.flags = flags; 534 init_attr.xmit_slave = xmit_slave; 535 536 ret = device->ops.create_ah(ah, &init_attr, udata); 537 if (ret) { 538 kfree(ah); 539 return ERR_PTR(ret); 540 } 541 542 atomic_inc(&pd->usecnt); 543 return ah; 544 } 545 546 /** 547 * rdma_create_ah - Creates an address handle for the 548 * given address vector. 549 * @pd: The protection domain associated with the address handle. 550 * @ah_attr: The attributes of the address vector. 551 * @flags: Create address handle flags (see enum rdma_create_ah_flags). 552 * 553 * It returns 0 on success and returns appropriate error code on error. 554 * The address handle is used to reference a local or global destination 555 * in all UD QP post sends. 556 */ 557 struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, 558 u32 flags) 559 { 560 const struct ib_gid_attr *old_sgid_attr; 561 struct net_device *slave; 562 struct ib_ah *ah; 563 int ret; 564 565 ret = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr); 566 if (ret) 567 return ERR_PTR(ret); 568 slave = rdma_lag_get_ah_roce_slave(pd->device, ah_attr, 569 (flags & RDMA_CREATE_AH_SLEEPABLE) ? 570 GFP_KERNEL : GFP_ATOMIC); 571 if (IS_ERR(slave)) { 572 rdma_unfill_sgid_attr(ah_attr, old_sgid_attr); 573 return (void *)slave; 574 } 575 ah = _rdma_create_ah(pd, ah_attr, flags, NULL, slave); 576 rdma_lag_put_ah_roce_slave(slave); 577 rdma_unfill_sgid_attr(ah_attr, old_sgid_attr); 578 return ah; 579 } 580 EXPORT_SYMBOL(rdma_create_ah); 581 582 /** 583 * rdma_create_user_ah - Creates an address handle for the 584 * given address vector. 585 * It resolves destination mac address for ah attribute of RoCE type. 586 * @pd: The protection domain associated with the address handle. 587 * @ah_attr: The attributes of the address vector. 588 * @udata: pointer to user's input output buffer information need by 589 * provider driver. 590 * 591 * It returns 0 on success and returns appropriate error code on error. 592 * The address handle is used to reference a local or global destination 593 * in all UD QP post sends. 594 */ 595 struct ib_ah *rdma_create_user_ah(struct ib_pd *pd, 596 struct rdma_ah_attr *ah_attr, 597 struct ib_udata *udata) 598 { 599 const struct ib_gid_attr *old_sgid_attr; 600 struct ib_ah *ah; 601 int err; 602 603 err = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr); 604 if (err) 605 return ERR_PTR(err); 606 607 if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) { 608 err = ib_resolve_eth_dmac(pd->device, ah_attr); 609 if (err) { 610 ah = ERR_PTR(err); 611 goto out; 612 } 613 } 614 615 ah = _rdma_create_ah(pd, ah_attr, RDMA_CREATE_AH_SLEEPABLE, 616 udata, NULL); 617 618 out: 619 rdma_unfill_sgid_attr(ah_attr, old_sgid_attr); 620 return ah; 621 } 622 EXPORT_SYMBOL(rdma_create_user_ah); 623 624 int ib_get_rdma_header_version(const union rdma_network_hdr *hdr) 625 { 626 const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh; 627 struct iphdr ip4h_checked; 628 const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh; 629 630 /* If it's IPv6, the version must be 6, otherwise, the first 631 * 20 bytes (before the IPv4 header) are garbled. 632 */ 633 if (ip6h->version != 6) 634 return (ip4h->version == 4) ? 4 : 0; 635 /* version may be 6 or 4 because the first 20 bytes could be garbled */ 636 637 /* RoCE v2 requires no options, thus header length 638 * must be 5 words 639 */ 640 if (ip4h->ihl != 5) 641 return 6; 642 643 /* Verify checksum. 644 * We can't write on scattered buffers so we need to copy to 645 * temp buffer. 646 */ 647 memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked)); 648 ip4h_checked.check = 0; 649 ip4h_checked.check = ip_fast_csum((u8 *)&ip4h_checked, 5); 650 /* if IPv4 header checksum is OK, believe it */ 651 if (ip4h->check == ip4h_checked.check) 652 return 4; 653 return 6; 654 } 655 EXPORT_SYMBOL(ib_get_rdma_header_version); 656 657 static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device, 658 u8 port_num, 659 const struct ib_grh *grh) 660 { 661 int grh_version; 662 663 if (rdma_protocol_ib(device, port_num)) 664 return RDMA_NETWORK_IB; 665 666 grh_version = ib_get_rdma_header_version((union rdma_network_hdr *)grh); 667 668 if (grh_version == 4) 669 return RDMA_NETWORK_IPV4; 670 671 if (grh->next_hdr == IPPROTO_UDP) 672 return RDMA_NETWORK_IPV6; 673 674 return RDMA_NETWORK_ROCE_V1; 675 } 676 677 struct find_gid_index_context { 678 u16 vlan_id; 679 enum ib_gid_type gid_type; 680 }; 681 682 static bool find_gid_index(const union ib_gid *gid, 683 const struct ib_gid_attr *gid_attr, 684 void *context) 685 { 686 struct find_gid_index_context *ctx = context; 687 u16 vlan_id = 0xffff; 688 int ret; 689 690 if (ctx->gid_type != gid_attr->gid_type) 691 return false; 692 693 ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL); 694 if (ret) 695 return false; 696 697 return ctx->vlan_id == vlan_id; 698 } 699 700 static const struct ib_gid_attr * 701 get_sgid_attr_from_eth(struct ib_device *device, u8 port_num, 702 u16 vlan_id, const union ib_gid *sgid, 703 enum ib_gid_type gid_type) 704 { 705 struct find_gid_index_context context = {.vlan_id = vlan_id, 706 .gid_type = gid_type}; 707 708 return rdma_find_gid_by_filter(device, sgid, port_num, find_gid_index, 709 &context); 710 } 711 712 int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr, 713 enum rdma_network_type net_type, 714 union ib_gid *sgid, union ib_gid *dgid) 715 { 716 struct sockaddr_in src_in; 717 struct sockaddr_in dst_in; 718 __be32 src_saddr, dst_saddr; 719 720 if (!sgid || !dgid) 721 return -EINVAL; 722 723 if (net_type == RDMA_NETWORK_IPV4) { 724 memcpy(&src_in.sin_addr.s_addr, 725 &hdr->roce4grh.saddr, 4); 726 memcpy(&dst_in.sin_addr.s_addr, 727 &hdr->roce4grh.daddr, 4); 728 src_saddr = src_in.sin_addr.s_addr; 729 dst_saddr = dst_in.sin_addr.s_addr; 730 ipv6_addr_set_v4mapped(src_saddr, 731 (struct in6_addr *)sgid); 732 ipv6_addr_set_v4mapped(dst_saddr, 733 (struct in6_addr *)dgid); 734 return 0; 735 } else if (net_type == RDMA_NETWORK_IPV6 || 736 net_type == RDMA_NETWORK_IB || RDMA_NETWORK_ROCE_V1) { 737 *dgid = hdr->ibgrh.dgid; 738 *sgid = hdr->ibgrh.sgid; 739 return 0; 740 } else { 741 return -EINVAL; 742 } 743 } 744 EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr); 745 746 /* Resolve destination mac address and hop limit for unicast destination 747 * GID entry, considering the source GID entry as well. 748 * ah_attribute must have have valid port_num, sgid_index. 749 */ 750 static int ib_resolve_unicast_gid_dmac(struct ib_device *device, 751 struct rdma_ah_attr *ah_attr) 752 { 753 struct ib_global_route *grh = rdma_ah_retrieve_grh(ah_attr); 754 const struct ib_gid_attr *sgid_attr = grh->sgid_attr; 755 int hop_limit = 0xff; 756 int ret = 0; 757 758 /* If destination is link local and source GID is RoCEv1, 759 * IP stack is not used. 760 */ 761 if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw) && 762 sgid_attr->gid_type == IB_GID_TYPE_ROCE) { 763 rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw, 764 ah_attr->roce.dmac); 765 return ret; 766 } 767 768 ret = rdma_addr_find_l2_eth_by_grh(&sgid_attr->gid, &grh->dgid, 769 ah_attr->roce.dmac, 770 sgid_attr, &hop_limit); 771 772 grh->hop_limit = hop_limit; 773 return ret; 774 } 775 776 /* 777 * This function initializes address handle attributes from the incoming packet. 778 * Incoming packet has dgid of the receiver node on which this code is 779 * getting executed and, sgid contains the GID of the sender. 780 * 781 * When resolving mac address of destination, the arrived dgid is used 782 * as sgid and, sgid is used as dgid because sgid contains destinations 783 * GID whom to respond to. 784 * 785 * On success the caller is responsible to call rdma_destroy_ah_attr on the 786 * attr. 787 */ 788 int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num, 789 const struct ib_wc *wc, const struct ib_grh *grh, 790 struct rdma_ah_attr *ah_attr) 791 { 792 u32 flow_class; 793 int ret; 794 enum rdma_network_type net_type = RDMA_NETWORK_IB; 795 enum ib_gid_type gid_type = IB_GID_TYPE_IB; 796 const struct ib_gid_attr *sgid_attr; 797 int hoplimit = 0xff; 798 union ib_gid dgid; 799 union ib_gid sgid; 800 801 might_sleep(); 802 803 memset(ah_attr, 0, sizeof *ah_attr); 804 ah_attr->type = rdma_ah_find_type(device, port_num); 805 if (rdma_cap_eth_ah(device, port_num)) { 806 if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE) 807 net_type = wc->network_hdr_type; 808 else 809 net_type = ib_get_net_type_by_grh(device, port_num, grh); 810 gid_type = ib_network_to_gid_type(net_type); 811 } 812 ret = ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type, 813 &sgid, &dgid); 814 if (ret) 815 return ret; 816 817 rdma_ah_set_sl(ah_attr, wc->sl); 818 rdma_ah_set_port_num(ah_attr, port_num); 819 820 if (rdma_protocol_roce(device, port_num)) { 821 u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ? 822 wc->vlan_id : 0xffff; 823 824 if (!(wc->wc_flags & IB_WC_GRH)) 825 return -EPROTOTYPE; 826 827 sgid_attr = get_sgid_attr_from_eth(device, port_num, 828 vlan_id, &dgid, 829 gid_type); 830 if (IS_ERR(sgid_attr)) 831 return PTR_ERR(sgid_attr); 832 833 flow_class = be32_to_cpu(grh->version_tclass_flow); 834 rdma_move_grh_sgid_attr(ah_attr, 835 &sgid, 836 flow_class & 0xFFFFF, 837 hoplimit, 838 (flow_class >> 20) & 0xFF, 839 sgid_attr); 840 841 ret = ib_resolve_unicast_gid_dmac(device, ah_attr); 842 if (ret) 843 rdma_destroy_ah_attr(ah_attr); 844 845 return ret; 846 } else { 847 rdma_ah_set_dlid(ah_attr, wc->slid); 848 rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits); 849 850 if ((wc->wc_flags & IB_WC_GRH) == 0) 851 return 0; 852 853 if (dgid.global.interface_id != 854 cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) { 855 sgid_attr = rdma_find_gid_by_port( 856 device, &dgid, IB_GID_TYPE_IB, port_num, NULL); 857 } else 858 sgid_attr = rdma_get_gid_attr(device, port_num, 0); 859 860 if (IS_ERR(sgid_attr)) 861 return PTR_ERR(sgid_attr); 862 flow_class = be32_to_cpu(grh->version_tclass_flow); 863 rdma_move_grh_sgid_attr(ah_attr, 864 &sgid, 865 flow_class & 0xFFFFF, 866 hoplimit, 867 (flow_class >> 20) & 0xFF, 868 sgid_attr); 869 870 return 0; 871 } 872 } 873 EXPORT_SYMBOL(ib_init_ah_attr_from_wc); 874 875 /** 876 * rdma_move_grh_sgid_attr - Sets the sgid attribute of GRH, taking ownership 877 * of the reference 878 * 879 * @attr: Pointer to AH attribute structure 880 * @dgid: Destination GID 881 * @flow_label: Flow label 882 * @hop_limit: Hop limit 883 * @traffic_class: traffic class 884 * @sgid_attr: Pointer to SGID attribute 885 * 886 * This takes ownership of the sgid_attr reference. The caller must ensure 887 * rdma_destroy_ah_attr() is called before destroying the rdma_ah_attr after 888 * calling this function. 889 */ 890 void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid, 891 u32 flow_label, u8 hop_limit, u8 traffic_class, 892 const struct ib_gid_attr *sgid_attr) 893 { 894 rdma_ah_set_grh(attr, dgid, flow_label, sgid_attr->index, hop_limit, 895 traffic_class); 896 attr->grh.sgid_attr = sgid_attr; 897 } 898 EXPORT_SYMBOL(rdma_move_grh_sgid_attr); 899 900 /** 901 * rdma_destroy_ah_attr - Release reference to SGID attribute of 902 * ah attribute. 903 * @ah_attr: Pointer to ah attribute 904 * 905 * Release reference to the SGID attribute of the ah attribute if it is 906 * non NULL. It is safe to call this multiple times, and safe to call it on 907 * a zero initialized ah_attr. 908 */ 909 void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr) 910 { 911 if (ah_attr->grh.sgid_attr) { 912 rdma_put_gid_attr(ah_attr->grh.sgid_attr); 913 ah_attr->grh.sgid_attr = NULL; 914 } 915 } 916 EXPORT_SYMBOL(rdma_destroy_ah_attr); 917 918 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, 919 const struct ib_grh *grh, u8 port_num) 920 { 921 struct rdma_ah_attr ah_attr; 922 struct ib_ah *ah; 923 int ret; 924 925 ret = ib_init_ah_attr_from_wc(pd->device, port_num, wc, grh, &ah_attr); 926 if (ret) 927 return ERR_PTR(ret); 928 929 ah = rdma_create_ah(pd, &ah_attr, RDMA_CREATE_AH_SLEEPABLE); 930 931 rdma_destroy_ah_attr(&ah_attr); 932 return ah; 933 } 934 EXPORT_SYMBOL(ib_create_ah_from_wc); 935 936 int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr) 937 { 938 const struct ib_gid_attr *old_sgid_attr; 939 int ret; 940 941 if (ah->type != ah_attr->type) 942 return -EINVAL; 943 944 ret = rdma_fill_sgid_attr(ah->device, ah_attr, &old_sgid_attr); 945 if (ret) 946 return ret; 947 948 ret = ah->device->ops.modify_ah ? 949 ah->device->ops.modify_ah(ah, ah_attr) : 950 -EOPNOTSUPP; 951 952 ah->sgid_attr = rdma_update_sgid_attr(ah_attr, ah->sgid_attr); 953 rdma_unfill_sgid_attr(ah_attr, old_sgid_attr); 954 return ret; 955 } 956 EXPORT_SYMBOL(rdma_modify_ah); 957 958 int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr) 959 { 960 ah_attr->grh.sgid_attr = NULL; 961 962 return ah->device->ops.query_ah ? 963 ah->device->ops.query_ah(ah, ah_attr) : 964 -EOPNOTSUPP; 965 } 966 EXPORT_SYMBOL(rdma_query_ah); 967 968 int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata) 969 { 970 const struct ib_gid_attr *sgid_attr = ah->sgid_attr; 971 struct ib_pd *pd; 972 int ret; 973 974 might_sleep_if(flags & RDMA_DESTROY_AH_SLEEPABLE); 975 976 pd = ah->pd; 977 978 ret = ah->device->ops.destroy_ah(ah, flags); 979 if (ret) 980 return ret; 981 982 atomic_dec(&pd->usecnt); 983 if (sgid_attr) 984 rdma_put_gid_attr(sgid_attr); 985 986 kfree(ah); 987 return ret; 988 } 989 EXPORT_SYMBOL(rdma_destroy_ah_user); 990 991 /* Shared receive queues */ 992 993 /** 994 * ib_create_srq_user - Creates a SRQ associated with the specified protection 995 * domain. 996 * @pd: The protection domain associated with the SRQ. 997 * @srq_init_attr: A list of initial attributes required to create the 998 * SRQ. If SRQ creation succeeds, then the attributes are updated to 999 * the actual capabilities of the created SRQ. 1000 * @uobject: uobject pointer if this is not a kernel SRQ 1001 * @udata: udata pointer if this is not a kernel SRQ 1002 * 1003 * srq_attr->max_wr and srq_attr->max_sge are read the determine the 1004 * requested size of the SRQ, and set to the actual values allocated 1005 * on return. If ib_create_srq() succeeds, then max_wr and max_sge 1006 * will always be at least as large as the requested values. 1007 */ 1008 struct ib_srq *ib_create_srq_user(struct ib_pd *pd, 1009 struct ib_srq_init_attr *srq_init_attr, 1010 struct ib_usrq_object *uobject, 1011 struct ib_udata *udata) 1012 { 1013 struct ib_srq *srq; 1014 int ret; 1015 1016 srq = rdma_zalloc_drv_obj(pd->device, ib_srq); 1017 if (!srq) 1018 return ERR_PTR(-ENOMEM); 1019 1020 srq->device = pd->device; 1021 srq->pd = pd; 1022 srq->event_handler = srq_init_attr->event_handler; 1023 srq->srq_context = srq_init_attr->srq_context; 1024 srq->srq_type = srq_init_attr->srq_type; 1025 srq->uobject = uobject; 1026 1027 if (ib_srq_has_cq(srq->srq_type)) { 1028 srq->ext.cq = srq_init_attr->ext.cq; 1029 atomic_inc(&srq->ext.cq->usecnt); 1030 } 1031 if (srq->srq_type == IB_SRQT_XRC) { 1032 srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd; 1033 atomic_inc(&srq->ext.xrc.xrcd->usecnt); 1034 } 1035 atomic_inc(&pd->usecnt); 1036 1037 ret = pd->device->ops.create_srq(srq, srq_init_attr, udata); 1038 if (ret) { 1039 atomic_dec(&srq->pd->usecnt); 1040 if (srq->srq_type == IB_SRQT_XRC) 1041 atomic_dec(&srq->ext.xrc.xrcd->usecnt); 1042 if (ib_srq_has_cq(srq->srq_type)) 1043 atomic_dec(&srq->ext.cq->usecnt); 1044 kfree(srq); 1045 return ERR_PTR(ret); 1046 } 1047 1048 return srq; 1049 } 1050 EXPORT_SYMBOL(ib_create_srq_user); 1051 1052 int ib_modify_srq(struct ib_srq *srq, 1053 struct ib_srq_attr *srq_attr, 1054 enum ib_srq_attr_mask srq_attr_mask) 1055 { 1056 return srq->device->ops.modify_srq ? 1057 srq->device->ops.modify_srq(srq, srq_attr, srq_attr_mask, 1058 NULL) : -EOPNOTSUPP; 1059 } 1060 EXPORT_SYMBOL(ib_modify_srq); 1061 1062 int ib_query_srq(struct ib_srq *srq, 1063 struct ib_srq_attr *srq_attr) 1064 { 1065 return srq->device->ops.query_srq ? 1066 srq->device->ops.query_srq(srq, srq_attr) : -EOPNOTSUPP; 1067 } 1068 EXPORT_SYMBOL(ib_query_srq); 1069 1070 int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata) 1071 { 1072 int ret; 1073 1074 if (atomic_read(&srq->usecnt)) 1075 return -EBUSY; 1076 1077 ret = srq->device->ops.destroy_srq(srq, udata); 1078 if (ret) 1079 return ret; 1080 1081 atomic_dec(&srq->pd->usecnt); 1082 if (srq->srq_type == IB_SRQT_XRC) 1083 atomic_dec(&srq->ext.xrc.xrcd->usecnt); 1084 if (ib_srq_has_cq(srq->srq_type)) 1085 atomic_dec(&srq->ext.cq->usecnt); 1086 kfree(srq); 1087 1088 return ret; 1089 } 1090 EXPORT_SYMBOL(ib_destroy_srq_user); 1091 1092 /* Queue pairs */ 1093 1094 static void __ib_shared_qp_event_handler(struct ib_event *event, void *context) 1095 { 1096 struct ib_qp *qp = context; 1097 unsigned long flags; 1098 1099 spin_lock_irqsave(&qp->device->qp_open_list_lock, flags); 1100 list_for_each_entry(event->element.qp, &qp->open_list, open_list) 1101 if (event->element.qp->event_handler) 1102 event->element.qp->event_handler(event, event->element.qp->qp_context); 1103 spin_unlock_irqrestore(&qp->device->qp_open_list_lock, flags); 1104 } 1105 1106 static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp, 1107 void (*event_handler)(struct ib_event *, void *), 1108 void *qp_context) 1109 { 1110 struct ib_qp *qp; 1111 unsigned long flags; 1112 int err; 1113 1114 qp = kzalloc(sizeof *qp, GFP_KERNEL); 1115 if (!qp) 1116 return ERR_PTR(-ENOMEM); 1117 1118 qp->real_qp = real_qp; 1119 err = ib_open_shared_qp_security(qp, real_qp->device); 1120 if (err) { 1121 kfree(qp); 1122 return ERR_PTR(err); 1123 } 1124 1125 qp->real_qp = real_qp; 1126 atomic_inc(&real_qp->usecnt); 1127 qp->device = real_qp->device; 1128 qp->event_handler = event_handler; 1129 qp->qp_context = qp_context; 1130 qp->qp_num = real_qp->qp_num; 1131 qp->qp_type = real_qp->qp_type; 1132 1133 spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags); 1134 list_add(&qp->open_list, &real_qp->open_list); 1135 spin_unlock_irqrestore(&real_qp->device->qp_open_list_lock, flags); 1136 1137 return qp; 1138 } 1139 1140 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, 1141 struct ib_qp_open_attr *qp_open_attr) 1142 { 1143 struct ib_qp *qp, *real_qp; 1144 1145 if (qp_open_attr->qp_type != IB_QPT_XRC_TGT) 1146 return ERR_PTR(-EINVAL); 1147 1148 down_read(&xrcd->tgt_qps_rwsem); 1149 real_qp = xa_load(&xrcd->tgt_qps, qp_open_attr->qp_num); 1150 if (!real_qp) { 1151 up_read(&xrcd->tgt_qps_rwsem); 1152 return ERR_PTR(-EINVAL); 1153 } 1154 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler, 1155 qp_open_attr->qp_context); 1156 up_read(&xrcd->tgt_qps_rwsem); 1157 return qp; 1158 } 1159 EXPORT_SYMBOL(ib_open_qp); 1160 1161 static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp, 1162 struct ib_qp_init_attr *qp_init_attr) 1163 { 1164 struct ib_qp *real_qp = qp; 1165 int err; 1166 1167 qp->event_handler = __ib_shared_qp_event_handler; 1168 qp->qp_context = qp; 1169 qp->pd = NULL; 1170 qp->send_cq = qp->recv_cq = NULL; 1171 qp->srq = NULL; 1172 qp->xrcd = qp_init_attr->xrcd; 1173 atomic_inc(&qp_init_attr->xrcd->usecnt); 1174 INIT_LIST_HEAD(&qp->open_list); 1175 1176 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler, 1177 qp_init_attr->qp_context); 1178 if (IS_ERR(qp)) 1179 return qp; 1180 1181 err = xa_err(xa_store(&qp_init_attr->xrcd->tgt_qps, real_qp->qp_num, 1182 real_qp, GFP_KERNEL)); 1183 if (err) { 1184 ib_close_qp(qp); 1185 return ERR_PTR(err); 1186 } 1187 return qp; 1188 } 1189 1190 /** 1191 * ib_create_qp - Creates a kernel QP associated with the specified protection 1192 * domain. 1193 * @pd: The protection domain associated with the QP. 1194 * @qp_init_attr: A list of initial attributes required to create the 1195 * QP. If QP creation succeeds, then the attributes are updated to 1196 * the actual capabilities of the created QP. 1197 * 1198 * NOTE: for user qp use ib_create_qp_user with valid udata! 1199 */ 1200 struct ib_qp *ib_create_qp(struct ib_pd *pd, 1201 struct ib_qp_init_attr *qp_init_attr) 1202 { 1203 struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device; 1204 struct ib_qp *qp; 1205 int ret; 1206 1207 if (qp_init_attr->rwq_ind_tbl && 1208 (qp_init_attr->recv_cq || 1209 qp_init_attr->srq || qp_init_attr->cap.max_recv_wr || 1210 qp_init_attr->cap.max_recv_sge)) 1211 return ERR_PTR(-EINVAL); 1212 1213 if ((qp_init_attr->create_flags & IB_QP_CREATE_INTEGRITY_EN) && 1214 !(device->attrs.device_cap_flags & IB_DEVICE_INTEGRITY_HANDOVER)) 1215 return ERR_PTR(-EINVAL); 1216 1217 /* 1218 * If the callers is using the RDMA API calculate the resources 1219 * needed for the RDMA READ/WRITE operations. 1220 * 1221 * Note that these callers need to pass in a port number. 1222 */ 1223 if (qp_init_attr->cap.max_rdma_ctxs) 1224 rdma_rw_init_qp(device, qp_init_attr); 1225 1226 qp = _ib_create_qp(device, pd, qp_init_attr, NULL, NULL); 1227 if (IS_ERR(qp)) 1228 return qp; 1229 1230 ret = ib_create_qp_security(qp, device); 1231 if (ret) 1232 goto err; 1233 1234 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) { 1235 struct ib_qp *xrc_qp = 1236 create_xrc_qp_user(qp, qp_init_attr); 1237 1238 if (IS_ERR(xrc_qp)) { 1239 ret = PTR_ERR(xrc_qp); 1240 goto err; 1241 } 1242 return xrc_qp; 1243 } 1244 1245 qp->event_handler = qp_init_attr->event_handler; 1246 qp->qp_context = qp_init_attr->qp_context; 1247 if (qp_init_attr->qp_type == IB_QPT_XRC_INI) { 1248 qp->recv_cq = NULL; 1249 qp->srq = NULL; 1250 } else { 1251 qp->recv_cq = qp_init_attr->recv_cq; 1252 if (qp_init_attr->recv_cq) 1253 atomic_inc(&qp_init_attr->recv_cq->usecnt); 1254 qp->srq = qp_init_attr->srq; 1255 if (qp->srq) 1256 atomic_inc(&qp_init_attr->srq->usecnt); 1257 } 1258 1259 qp->send_cq = qp_init_attr->send_cq; 1260 qp->xrcd = NULL; 1261 1262 atomic_inc(&pd->usecnt); 1263 if (qp_init_attr->send_cq) 1264 atomic_inc(&qp_init_attr->send_cq->usecnt); 1265 if (qp_init_attr->rwq_ind_tbl) 1266 atomic_inc(&qp->rwq_ind_tbl->usecnt); 1267 1268 if (qp_init_attr->cap.max_rdma_ctxs) { 1269 ret = rdma_rw_init_mrs(qp, qp_init_attr); 1270 if (ret) 1271 goto err; 1272 } 1273 1274 /* 1275 * Note: all hw drivers guarantee that max_send_sge is lower than 1276 * the device RDMA WRITE SGE limit but not all hw drivers ensure that 1277 * max_send_sge <= max_sge_rd. 1278 */ 1279 qp->max_write_sge = qp_init_attr->cap.max_send_sge; 1280 qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge, 1281 device->attrs.max_sge_rd); 1282 if (qp_init_attr->create_flags & IB_QP_CREATE_INTEGRITY_EN) 1283 qp->integrity_en = true; 1284 1285 return qp; 1286 1287 err: 1288 ib_destroy_qp(qp); 1289 return ERR_PTR(ret); 1290 1291 } 1292 EXPORT_SYMBOL(ib_create_qp); 1293 1294 static const struct { 1295 int valid; 1296 enum ib_qp_attr_mask req_param[IB_QPT_MAX]; 1297 enum ib_qp_attr_mask opt_param[IB_QPT_MAX]; 1298 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { 1299 [IB_QPS_RESET] = { 1300 [IB_QPS_RESET] = { .valid = 1 }, 1301 [IB_QPS_INIT] = { 1302 .valid = 1, 1303 .req_param = { 1304 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 1305 IB_QP_PORT | 1306 IB_QP_QKEY), 1307 [IB_QPT_RAW_PACKET] = IB_QP_PORT, 1308 [IB_QPT_UC] = (IB_QP_PKEY_INDEX | 1309 IB_QP_PORT | 1310 IB_QP_ACCESS_FLAGS), 1311 [IB_QPT_RC] = (IB_QP_PKEY_INDEX | 1312 IB_QP_PORT | 1313 IB_QP_ACCESS_FLAGS), 1314 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | 1315 IB_QP_PORT | 1316 IB_QP_ACCESS_FLAGS), 1317 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | 1318 IB_QP_PORT | 1319 IB_QP_ACCESS_FLAGS), 1320 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 1321 IB_QP_QKEY), 1322 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 1323 IB_QP_QKEY), 1324 } 1325 }, 1326 }, 1327 [IB_QPS_INIT] = { 1328 [IB_QPS_RESET] = { .valid = 1 }, 1329 [IB_QPS_ERR] = { .valid = 1 }, 1330 [IB_QPS_INIT] = { 1331 .valid = 1, 1332 .opt_param = { 1333 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 1334 IB_QP_PORT | 1335 IB_QP_QKEY), 1336 [IB_QPT_UC] = (IB_QP_PKEY_INDEX | 1337 IB_QP_PORT | 1338 IB_QP_ACCESS_FLAGS), 1339 [IB_QPT_RC] = (IB_QP_PKEY_INDEX | 1340 IB_QP_PORT | 1341 IB_QP_ACCESS_FLAGS), 1342 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | 1343 IB_QP_PORT | 1344 IB_QP_ACCESS_FLAGS), 1345 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | 1346 IB_QP_PORT | 1347 IB_QP_ACCESS_FLAGS), 1348 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 1349 IB_QP_QKEY), 1350 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 1351 IB_QP_QKEY), 1352 } 1353 }, 1354 [IB_QPS_RTR] = { 1355 .valid = 1, 1356 .req_param = { 1357 [IB_QPT_UC] = (IB_QP_AV | 1358 IB_QP_PATH_MTU | 1359 IB_QP_DEST_QPN | 1360 IB_QP_RQ_PSN), 1361 [IB_QPT_RC] = (IB_QP_AV | 1362 IB_QP_PATH_MTU | 1363 IB_QP_DEST_QPN | 1364 IB_QP_RQ_PSN | 1365 IB_QP_MAX_DEST_RD_ATOMIC | 1366 IB_QP_MIN_RNR_TIMER), 1367 [IB_QPT_XRC_INI] = (IB_QP_AV | 1368 IB_QP_PATH_MTU | 1369 IB_QP_DEST_QPN | 1370 IB_QP_RQ_PSN), 1371 [IB_QPT_XRC_TGT] = (IB_QP_AV | 1372 IB_QP_PATH_MTU | 1373 IB_QP_DEST_QPN | 1374 IB_QP_RQ_PSN | 1375 IB_QP_MAX_DEST_RD_ATOMIC | 1376 IB_QP_MIN_RNR_TIMER), 1377 }, 1378 .opt_param = { 1379 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 1380 IB_QP_QKEY), 1381 [IB_QPT_UC] = (IB_QP_ALT_PATH | 1382 IB_QP_ACCESS_FLAGS | 1383 IB_QP_PKEY_INDEX), 1384 [IB_QPT_RC] = (IB_QP_ALT_PATH | 1385 IB_QP_ACCESS_FLAGS | 1386 IB_QP_PKEY_INDEX), 1387 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH | 1388 IB_QP_ACCESS_FLAGS | 1389 IB_QP_PKEY_INDEX), 1390 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH | 1391 IB_QP_ACCESS_FLAGS | 1392 IB_QP_PKEY_INDEX), 1393 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 1394 IB_QP_QKEY), 1395 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 1396 IB_QP_QKEY), 1397 }, 1398 }, 1399 }, 1400 [IB_QPS_RTR] = { 1401 [IB_QPS_RESET] = { .valid = 1 }, 1402 [IB_QPS_ERR] = { .valid = 1 }, 1403 [IB_QPS_RTS] = { 1404 .valid = 1, 1405 .req_param = { 1406 [IB_QPT_UD] = IB_QP_SQ_PSN, 1407 [IB_QPT_UC] = IB_QP_SQ_PSN, 1408 [IB_QPT_RC] = (IB_QP_TIMEOUT | 1409 IB_QP_RETRY_CNT | 1410 IB_QP_RNR_RETRY | 1411 IB_QP_SQ_PSN | 1412 IB_QP_MAX_QP_RD_ATOMIC), 1413 [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT | 1414 IB_QP_RETRY_CNT | 1415 IB_QP_RNR_RETRY | 1416 IB_QP_SQ_PSN | 1417 IB_QP_MAX_QP_RD_ATOMIC), 1418 [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT | 1419 IB_QP_SQ_PSN), 1420 [IB_QPT_SMI] = IB_QP_SQ_PSN, 1421 [IB_QPT_GSI] = IB_QP_SQ_PSN, 1422 }, 1423 .opt_param = { 1424 [IB_QPT_UD] = (IB_QP_CUR_STATE | 1425 IB_QP_QKEY), 1426 [IB_QPT_UC] = (IB_QP_CUR_STATE | 1427 IB_QP_ALT_PATH | 1428 IB_QP_ACCESS_FLAGS | 1429 IB_QP_PATH_MIG_STATE), 1430 [IB_QPT_RC] = (IB_QP_CUR_STATE | 1431 IB_QP_ALT_PATH | 1432 IB_QP_ACCESS_FLAGS | 1433 IB_QP_MIN_RNR_TIMER | 1434 IB_QP_PATH_MIG_STATE), 1435 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | 1436 IB_QP_ALT_PATH | 1437 IB_QP_ACCESS_FLAGS | 1438 IB_QP_PATH_MIG_STATE), 1439 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | 1440 IB_QP_ALT_PATH | 1441 IB_QP_ACCESS_FLAGS | 1442 IB_QP_MIN_RNR_TIMER | 1443 IB_QP_PATH_MIG_STATE), 1444 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 1445 IB_QP_QKEY), 1446 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 1447 IB_QP_QKEY), 1448 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT, 1449 } 1450 } 1451 }, 1452 [IB_QPS_RTS] = { 1453 [IB_QPS_RESET] = { .valid = 1 }, 1454 [IB_QPS_ERR] = { .valid = 1 }, 1455 [IB_QPS_RTS] = { 1456 .valid = 1, 1457 .opt_param = { 1458 [IB_QPT_UD] = (IB_QP_CUR_STATE | 1459 IB_QP_QKEY), 1460 [IB_QPT_UC] = (IB_QP_CUR_STATE | 1461 IB_QP_ACCESS_FLAGS | 1462 IB_QP_ALT_PATH | 1463 IB_QP_PATH_MIG_STATE), 1464 [IB_QPT_RC] = (IB_QP_CUR_STATE | 1465 IB_QP_ACCESS_FLAGS | 1466 IB_QP_ALT_PATH | 1467 IB_QP_PATH_MIG_STATE | 1468 IB_QP_MIN_RNR_TIMER), 1469 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | 1470 IB_QP_ACCESS_FLAGS | 1471 IB_QP_ALT_PATH | 1472 IB_QP_PATH_MIG_STATE), 1473 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | 1474 IB_QP_ACCESS_FLAGS | 1475 IB_QP_ALT_PATH | 1476 IB_QP_PATH_MIG_STATE | 1477 IB_QP_MIN_RNR_TIMER), 1478 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 1479 IB_QP_QKEY), 1480 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 1481 IB_QP_QKEY), 1482 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT, 1483 } 1484 }, 1485 [IB_QPS_SQD] = { 1486 .valid = 1, 1487 .opt_param = { 1488 [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1489 [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1490 [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1491 [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1492 [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */ 1493 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1494 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY 1495 } 1496 }, 1497 }, 1498 [IB_QPS_SQD] = { 1499 [IB_QPS_RESET] = { .valid = 1 }, 1500 [IB_QPS_ERR] = { .valid = 1 }, 1501 [IB_QPS_RTS] = { 1502 .valid = 1, 1503 .opt_param = { 1504 [IB_QPT_UD] = (IB_QP_CUR_STATE | 1505 IB_QP_QKEY), 1506 [IB_QPT_UC] = (IB_QP_CUR_STATE | 1507 IB_QP_ALT_PATH | 1508 IB_QP_ACCESS_FLAGS | 1509 IB_QP_PATH_MIG_STATE), 1510 [IB_QPT_RC] = (IB_QP_CUR_STATE | 1511 IB_QP_ALT_PATH | 1512 IB_QP_ACCESS_FLAGS | 1513 IB_QP_MIN_RNR_TIMER | 1514 IB_QP_PATH_MIG_STATE), 1515 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | 1516 IB_QP_ALT_PATH | 1517 IB_QP_ACCESS_FLAGS | 1518 IB_QP_PATH_MIG_STATE), 1519 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | 1520 IB_QP_ALT_PATH | 1521 IB_QP_ACCESS_FLAGS | 1522 IB_QP_MIN_RNR_TIMER | 1523 IB_QP_PATH_MIG_STATE), 1524 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 1525 IB_QP_QKEY), 1526 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 1527 IB_QP_QKEY), 1528 } 1529 }, 1530 [IB_QPS_SQD] = { 1531 .valid = 1, 1532 .opt_param = { 1533 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 1534 IB_QP_QKEY), 1535 [IB_QPT_UC] = (IB_QP_AV | 1536 IB_QP_ALT_PATH | 1537 IB_QP_ACCESS_FLAGS | 1538 IB_QP_PKEY_INDEX | 1539 IB_QP_PATH_MIG_STATE), 1540 [IB_QPT_RC] = (IB_QP_PORT | 1541 IB_QP_AV | 1542 IB_QP_TIMEOUT | 1543 IB_QP_RETRY_CNT | 1544 IB_QP_RNR_RETRY | 1545 IB_QP_MAX_QP_RD_ATOMIC | 1546 IB_QP_MAX_DEST_RD_ATOMIC | 1547 IB_QP_ALT_PATH | 1548 IB_QP_ACCESS_FLAGS | 1549 IB_QP_PKEY_INDEX | 1550 IB_QP_MIN_RNR_TIMER | 1551 IB_QP_PATH_MIG_STATE), 1552 [IB_QPT_XRC_INI] = (IB_QP_PORT | 1553 IB_QP_AV | 1554 IB_QP_TIMEOUT | 1555 IB_QP_RETRY_CNT | 1556 IB_QP_RNR_RETRY | 1557 IB_QP_MAX_QP_RD_ATOMIC | 1558 IB_QP_ALT_PATH | 1559 IB_QP_ACCESS_FLAGS | 1560 IB_QP_PKEY_INDEX | 1561 IB_QP_PATH_MIG_STATE), 1562 [IB_QPT_XRC_TGT] = (IB_QP_PORT | 1563 IB_QP_AV | 1564 IB_QP_TIMEOUT | 1565 IB_QP_MAX_DEST_RD_ATOMIC | 1566 IB_QP_ALT_PATH | 1567 IB_QP_ACCESS_FLAGS | 1568 IB_QP_PKEY_INDEX | 1569 IB_QP_MIN_RNR_TIMER | 1570 IB_QP_PATH_MIG_STATE), 1571 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 1572 IB_QP_QKEY), 1573 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 1574 IB_QP_QKEY), 1575 } 1576 } 1577 }, 1578 [IB_QPS_SQE] = { 1579 [IB_QPS_RESET] = { .valid = 1 }, 1580 [IB_QPS_ERR] = { .valid = 1 }, 1581 [IB_QPS_RTS] = { 1582 .valid = 1, 1583 .opt_param = { 1584 [IB_QPT_UD] = (IB_QP_CUR_STATE | 1585 IB_QP_QKEY), 1586 [IB_QPT_UC] = (IB_QP_CUR_STATE | 1587 IB_QP_ACCESS_FLAGS), 1588 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 1589 IB_QP_QKEY), 1590 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 1591 IB_QP_QKEY), 1592 } 1593 } 1594 }, 1595 [IB_QPS_ERR] = { 1596 [IB_QPS_RESET] = { .valid = 1 }, 1597 [IB_QPS_ERR] = { .valid = 1 } 1598 } 1599 }; 1600 1601 bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, 1602 enum ib_qp_type type, enum ib_qp_attr_mask mask) 1603 { 1604 enum ib_qp_attr_mask req_param, opt_param; 1605 1606 if (mask & IB_QP_CUR_STATE && 1607 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS && 1608 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE) 1609 return false; 1610 1611 if (!qp_state_table[cur_state][next_state].valid) 1612 return false; 1613 1614 req_param = qp_state_table[cur_state][next_state].req_param[type]; 1615 opt_param = qp_state_table[cur_state][next_state].opt_param[type]; 1616 1617 if ((mask & req_param) != req_param) 1618 return false; 1619 1620 if (mask & ~(req_param | opt_param | IB_QP_STATE)) 1621 return false; 1622 1623 return true; 1624 } 1625 EXPORT_SYMBOL(ib_modify_qp_is_ok); 1626 1627 /** 1628 * ib_resolve_eth_dmac - Resolve destination mac address 1629 * @device: Device to consider 1630 * @ah_attr: address handle attribute which describes the 1631 * source and destination parameters 1632 * ib_resolve_eth_dmac() resolves destination mac address and L3 hop limit It 1633 * returns 0 on success or appropriate error code. It initializes the 1634 * necessary ah_attr fields when call is successful. 1635 */ 1636 static int ib_resolve_eth_dmac(struct ib_device *device, 1637 struct rdma_ah_attr *ah_attr) 1638 { 1639 int ret = 0; 1640 1641 if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) { 1642 if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) { 1643 __be32 addr = 0; 1644 1645 memcpy(&addr, ah_attr->grh.dgid.raw + 12, 4); 1646 ip_eth_mc_map(addr, (char *)ah_attr->roce.dmac); 1647 } else { 1648 ipv6_eth_mc_map((struct in6_addr *)ah_attr->grh.dgid.raw, 1649 (char *)ah_attr->roce.dmac); 1650 } 1651 } else { 1652 ret = ib_resolve_unicast_gid_dmac(device, ah_attr); 1653 } 1654 return ret; 1655 } 1656 1657 static bool is_qp_type_connected(const struct ib_qp *qp) 1658 { 1659 return (qp->qp_type == IB_QPT_UC || 1660 qp->qp_type == IB_QPT_RC || 1661 qp->qp_type == IB_QPT_XRC_INI || 1662 qp->qp_type == IB_QPT_XRC_TGT); 1663 } 1664 1665 /** 1666 * IB core internal function to perform QP attributes modification. 1667 */ 1668 static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr, 1669 int attr_mask, struct ib_udata *udata) 1670 { 1671 u8 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; 1672 const struct ib_gid_attr *old_sgid_attr_av; 1673 const struct ib_gid_attr *old_sgid_attr_alt_av; 1674 int ret; 1675 1676 attr->xmit_slave = NULL; 1677 if (attr_mask & IB_QP_AV) { 1678 ret = rdma_fill_sgid_attr(qp->device, &attr->ah_attr, 1679 &old_sgid_attr_av); 1680 if (ret) 1681 return ret; 1682 1683 if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE && 1684 is_qp_type_connected(qp)) { 1685 struct net_device *slave; 1686 1687 /* 1688 * If the user provided the qp_attr then we have to 1689 * resolve it. Kerne users have to provide already 1690 * resolved rdma_ah_attr's. 1691 */ 1692 if (udata) { 1693 ret = ib_resolve_eth_dmac(qp->device, 1694 &attr->ah_attr); 1695 if (ret) 1696 goto out_av; 1697 } 1698 slave = rdma_lag_get_ah_roce_slave(qp->device, 1699 &attr->ah_attr, 1700 GFP_KERNEL); 1701 if (IS_ERR(slave)) 1702 goto out_av; 1703 attr->xmit_slave = slave; 1704 } 1705 } 1706 if (attr_mask & IB_QP_ALT_PATH) { 1707 /* 1708 * FIXME: This does not track the migration state, so if the 1709 * user loads a new alternate path after the HW has migrated 1710 * from primary->alternate we will keep the wrong 1711 * references. This is OK for IB because the reference 1712 * counting does not serve any functional purpose. 1713 */ 1714 ret = rdma_fill_sgid_attr(qp->device, &attr->alt_ah_attr, 1715 &old_sgid_attr_alt_av); 1716 if (ret) 1717 goto out_av; 1718 1719 /* 1720 * Today the core code can only handle alternate paths and APM 1721 * for IB. Ban them in roce mode. 1722 */ 1723 if (!(rdma_protocol_ib(qp->device, 1724 attr->alt_ah_attr.port_num) && 1725 rdma_protocol_ib(qp->device, port))) { 1726 ret = -EINVAL; 1727 goto out; 1728 } 1729 } 1730 1731 if (rdma_ib_or_roce(qp->device, port)) { 1732 if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) { 1733 dev_warn(&qp->device->dev, 1734 "%s rq_psn overflow, masking to 24 bits\n", 1735 __func__); 1736 attr->rq_psn &= 0xffffff; 1737 } 1738 1739 if (attr_mask & IB_QP_SQ_PSN && attr->sq_psn & ~0xffffff) { 1740 dev_warn(&qp->device->dev, 1741 " %s sq_psn overflow, masking to 24 bits\n", 1742 __func__); 1743 attr->sq_psn &= 0xffffff; 1744 } 1745 } 1746 1747 /* 1748 * Bind this qp to a counter automatically based on the rdma counter 1749 * rules. This only set in RST2INIT with port specified 1750 */ 1751 if (!qp->counter && (attr_mask & IB_QP_PORT) && 1752 ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_INIT)) 1753 rdma_counter_bind_qp_auto(qp, attr->port_num); 1754 1755 ret = ib_security_modify_qp(qp, attr, attr_mask, udata); 1756 if (ret) 1757 goto out; 1758 1759 if (attr_mask & IB_QP_PORT) 1760 qp->port = attr->port_num; 1761 if (attr_mask & IB_QP_AV) 1762 qp->av_sgid_attr = 1763 rdma_update_sgid_attr(&attr->ah_attr, qp->av_sgid_attr); 1764 if (attr_mask & IB_QP_ALT_PATH) 1765 qp->alt_path_sgid_attr = rdma_update_sgid_attr( 1766 &attr->alt_ah_attr, qp->alt_path_sgid_attr); 1767 1768 out: 1769 if (attr_mask & IB_QP_ALT_PATH) 1770 rdma_unfill_sgid_attr(&attr->alt_ah_attr, old_sgid_attr_alt_av); 1771 out_av: 1772 if (attr_mask & IB_QP_AV) { 1773 rdma_lag_put_ah_roce_slave(attr->xmit_slave); 1774 rdma_unfill_sgid_attr(&attr->ah_attr, old_sgid_attr_av); 1775 } 1776 return ret; 1777 } 1778 1779 /** 1780 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP. 1781 * @ib_qp: The QP to modify. 1782 * @attr: On input, specifies the QP attributes to modify. On output, 1783 * the current values of selected QP attributes are returned. 1784 * @attr_mask: A bit-mask used to specify which attributes of the QP 1785 * are being modified. 1786 * @udata: pointer to user's input output buffer information 1787 * are being modified. 1788 * It returns 0 on success and returns appropriate error code on error. 1789 */ 1790 int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr, 1791 int attr_mask, struct ib_udata *udata) 1792 { 1793 return _ib_modify_qp(ib_qp->real_qp, attr, attr_mask, udata); 1794 } 1795 EXPORT_SYMBOL(ib_modify_qp_with_udata); 1796 1797 int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u16 *speed, u8 *width) 1798 { 1799 int rc; 1800 u32 netdev_speed; 1801 struct net_device *netdev; 1802 struct ethtool_link_ksettings lksettings; 1803 1804 if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET) 1805 return -EINVAL; 1806 1807 netdev = ib_device_get_netdev(dev, port_num); 1808 if (!netdev) 1809 return -ENODEV; 1810 1811 rtnl_lock(); 1812 rc = __ethtool_get_link_ksettings(netdev, &lksettings); 1813 rtnl_unlock(); 1814 1815 dev_put(netdev); 1816 1817 if (!rc && lksettings.base.speed != (u32)SPEED_UNKNOWN) { 1818 netdev_speed = lksettings.base.speed; 1819 } else { 1820 netdev_speed = SPEED_1000; 1821 pr_warn("%s speed is unknown, defaulting to %d\n", netdev->name, 1822 netdev_speed); 1823 } 1824 1825 if (netdev_speed <= SPEED_1000) { 1826 *width = IB_WIDTH_1X; 1827 *speed = IB_SPEED_SDR; 1828 } else if (netdev_speed <= SPEED_10000) { 1829 *width = IB_WIDTH_1X; 1830 *speed = IB_SPEED_FDR10; 1831 } else if (netdev_speed <= SPEED_20000) { 1832 *width = IB_WIDTH_4X; 1833 *speed = IB_SPEED_DDR; 1834 } else if (netdev_speed <= SPEED_25000) { 1835 *width = IB_WIDTH_1X; 1836 *speed = IB_SPEED_EDR; 1837 } else if (netdev_speed <= SPEED_40000) { 1838 *width = IB_WIDTH_4X; 1839 *speed = IB_SPEED_FDR10; 1840 } else { 1841 *width = IB_WIDTH_4X; 1842 *speed = IB_SPEED_EDR; 1843 } 1844 1845 return 0; 1846 } 1847 EXPORT_SYMBOL(ib_get_eth_speed); 1848 1849 int ib_modify_qp(struct ib_qp *qp, 1850 struct ib_qp_attr *qp_attr, 1851 int qp_attr_mask) 1852 { 1853 return _ib_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); 1854 } 1855 EXPORT_SYMBOL(ib_modify_qp); 1856 1857 int ib_query_qp(struct ib_qp *qp, 1858 struct ib_qp_attr *qp_attr, 1859 int qp_attr_mask, 1860 struct ib_qp_init_attr *qp_init_attr) 1861 { 1862 qp_attr->ah_attr.grh.sgid_attr = NULL; 1863 qp_attr->alt_ah_attr.grh.sgid_attr = NULL; 1864 1865 return qp->device->ops.query_qp ? 1866 qp->device->ops.query_qp(qp->real_qp, qp_attr, qp_attr_mask, 1867 qp_init_attr) : -EOPNOTSUPP; 1868 } 1869 EXPORT_SYMBOL(ib_query_qp); 1870 1871 int ib_close_qp(struct ib_qp *qp) 1872 { 1873 struct ib_qp *real_qp; 1874 unsigned long flags; 1875 1876 real_qp = qp->real_qp; 1877 if (real_qp == qp) 1878 return -EINVAL; 1879 1880 spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags); 1881 list_del(&qp->open_list); 1882 spin_unlock_irqrestore(&real_qp->device->qp_open_list_lock, flags); 1883 1884 atomic_dec(&real_qp->usecnt); 1885 if (qp->qp_sec) 1886 ib_close_shared_qp_security(qp->qp_sec); 1887 kfree(qp); 1888 1889 return 0; 1890 } 1891 EXPORT_SYMBOL(ib_close_qp); 1892 1893 static int __ib_destroy_shared_qp(struct ib_qp *qp) 1894 { 1895 struct ib_xrcd *xrcd; 1896 struct ib_qp *real_qp; 1897 int ret; 1898 1899 real_qp = qp->real_qp; 1900 xrcd = real_qp->xrcd; 1901 down_write(&xrcd->tgt_qps_rwsem); 1902 ib_close_qp(qp); 1903 if (atomic_read(&real_qp->usecnt) == 0) 1904 xa_erase(&xrcd->tgt_qps, real_qp->qp_num); 1905 else 1906 real_qp = NULL; 1907 up_write(&xrcd->tgt_qps_rwsem); 1908 1909 if (real_qp) { 1910 ret = ib_destroy_qp(real_qp); 1911 if (!ret) 1912 atomic_dec(&xrcd->usecnt); 1913 } 1914 1915 return 0; 1916 } 1917 1918 int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata) 1919 { 1920 const struct ib_gid_attr *alt_path_sgid_attr = qp->alt_path_sgid_attr; 1921 const struct ib_gid_attr *av_sgid_attr = qp->av_sgid_attr; 1922 struct ib_pd *pd; 1923 struct ib_cq *scq, *rcq; 1924 struct ib_srq *srq; 1925 struct ib_rwq_ind_table *ind_tbl; 1926 struct ib_qp_security *sec; 1927 int ret; 1928 1929 WARN_ON_ONCE(qp->mrs_used > 0); 1930 1931 if (atomic_read(&qp->usecnt)) 1932 return -EBUSY; 1933 1934 if (qp->real_qp != qp) 1935 return __ib_destroy_shared_qp(qp); 1936 1937 pd = qp->pd; 1938 scq = qp->send_cq; 1939 rcq = qp->recv_cq; 1940 srq = qp->srq; 1941 ind_tbl = qp->rwq_ind_tbl; 1942 sec = qp->qp_sec; 1943 if (sec) 1944 ib_destroy_qp_security_begin(sec); 1945 1946 if (!qp->uobject) 1947 rdma_rw_cleanup_mrs(qp); 1948 1949 rdma_counter_unbind_qp(qp, true); 1950 rdma_restrack_del(&qp->res); 1951 ret = qp->device->ops.destroy_qp(qp, udata); 1952 if (!ret) { 1953 if (alt_path_sgid_attr) 1954 rdma_put_gid_attr(alt_path_sgid_attr); 1955 if (av_sgid_attr) 1956 rdma_put_gid_attr(av_sgid_attr); 1957 if (pd) 1958 atomic_dec(&pd->usecnt); 1959 if (scq) 1960 atomic_dec(&scq->usecnt); 1961 if (rcq) 1962 atomic_dec(&rcq->usecnt); 1963 if (srq) 1964 atomic_dec(&srq->usecnt); 1965 if (ind_tbl) 1966 atomic_dec(&ind_tbl->usecnt); 1967 if (sec) 1968 ib_destroy_qp_security_end(sec); 1969 } else { 1970 if (sec) 1971 ib_destroy_qp_security_abort(sec); 1972 } 1973 1974 return ret; 1975 } 1976 EXPORT_SYMBOL(ib_destroy_qp_user); 1977 1978 /* Completion queues */ 1979 1980 struct ib_cq *__ib_create_cq(struct ib_device *device, 1981 ib_comp_handler comp_handler, 1982 void (*event_handler)(struct ib_event *, void *), 1983 void *cq_context, 1984 const struct ib_cq_init_attr *cq_attr, 1985 const char *caller) 1986 { 1987 struct ib_cq *cq; 1988 int ret; 1989 1990 cq = rdma_zalloc_drv_obj(device, ib_cq); 1991 if (!cq) 1992 return ERR_PTR(-ENOMEM); 1993 1994 cq->device = device; 1995 cq->uobject = NULL; 1996 cq->comp_handler = comp_handler; 1997 cq->event_handler = event_handler; 1998 cq->cq_context = cq_context; 1999 atomic_set(&cq->usecnt, 0); 2000 2001 rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ); 2002 rdma_restrack_set_name(&cq->res, caller); 2003 2004 ret = device->ops.create_cq(cq, cq_attr, NULL); 2005 if (ret) { 2006 rdma_restrack_put(&cq->res); 2007 kfree(cq); 2008 return ERR_PTR(ret); 2009 } 2010 2011 rdma_restrack_add(&cq->res); 2012 return cq; 2013 } 2014 EXPORT_SYMBOL(__ib_create_cq); 2015 2016 int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period) 2017 { 2018 if (cq->shared) 2019 return -EOPNOTSUPP; 2020 2021 return cq->device->ops.modify_cq ? 2022 cq->device->ops.modify_cq(cq, cq_count, 2023 cq_period) : -EOPNOTSUPP; 2024 } 2025 EXPORT_SYMBOL(rdma_set_cq_moderation); 2026 2027 int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata) 2028 { 2029 int ret; 2030 2031 if (WARN_ON_ONCE(cq->shared)) 2032 return -EOPNOTSUPP; 2033 2034 if (atomic_read(&cq->usecnt)) 2035 return -EBUSY; 2036 2037 ret = cq->device->ops.destroy_cq(cq, udata); 2038 if (ret) 2039 return ret; 2040 2041 rdma_restrack_del(&cq->res); 2042 kfree(cq); 2043 return ret; 2044 } 2045 EXPORT_SYMBOL(ib_destroy_cq_user); 2046 2047 int ib_resize_cq(struct ib_cq *cq, int cqe) 2048 { 2049 if (cq->shared) 2050 return -EOPNOTSUPP; 2051 2052 return cq->device->ops.resize_cq ? 2053 cq->device->ops.resize_cq(cq, cqe, NULL) : -EOPNOTSUPP; 2054 } 2055 EXPORT_SYMBOL(ib_resize_cq); 2056 2057 /* Memory regions */ 2058 2059 struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 2060 u64 virt_addr, int access_flags) 2061 { 2062 struct ib_mr *mr; 2063 2064 if (access_flags & IB_ACCESS_ON_DEMAND) { 2065 if (!(pd->device->attrs.device_cap_flags & 2066 IB_DEVICE_ON_DEMAND_PAGING)) { 2067 pr_debug("ODP support not available\n"); 2068 return ERR_PTR(-EINVAL); 2069 } 2070 } 2071 2072 mr = pd->device->ops.reg_user_mr(pd, start, length, virt_addr, 2073 access_flags, NULL); 2074 2075 if (IS_ERR(mr)) 2076 return mr; 2077 2078 mr->device = pd->device; 2079 mr->pd = pd; 2080 mr->dm = NULL; 2081 atomic_inc(&pd->usecnt); 2082 2083 rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR); 2084 rdma_restrack_parent_name(&mr->res, &pd->res); 2085 rdma_restrack_add(&mr->res); 2086 2087 return mr; 2088 } 2089 EXPORT_SYMBOL(ib_reg_user_mr); 2090 2091 int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice, 2092 u32 flags, struct ib_sge *sg_list, u32 num_sge) 2093 { 2094 if (!pd->device->ops.advise_mr) 2095 return -EOPNOTSUPP; 2096 2097 if (!num_sge) 2098 return 0; 2099 2100 return pd->device->ops.advise_mr(pd, advice, flags, sg_list, num_sge, 2101 NULL); 2102 } 2103 EXPORT_SYMBOL(ib_advise_mr); 2104 2105 int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata) 2106 { 2107 struct ib_pd *pd = mr->pd; 2108 struct ib_dm *dm = mr->dm; 2109 struct ib_sig_attrs *sig_attrs = mr->sig_attrs; 2110 int ret; 2111 2112 trace_mr_dereg(mr); 2113 rdma_restrack_del(&mr->res); 2114 ret = mr->device->ops.dereg_mr(mr, udata); 2115 if (!ret) { 2116 atomic_dec(&pd->usecnt); 2117 if (dm) 2118 atomic_dec(&dm->usecnt); 2119 kfree(sig_attrs); 2120 } 2121 2122 return ret; 2123 } 2124 EXPORT_SYMBOL(ib_dereg_mr_user); 2125 2126 /** 2127 * ib_alloc_mr() - Allocates a memory region 2128 * @pd: protection domain associated with the region 2129 * @mr_type: memory region type 2130 * @max_num_sg: maximum sg entries available for registration. 2131 * 2132 * Notes: 2133 * Memory registeration page/sg lists must not exceed max_num_sg. 2134 * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed 2135 * max_num_sg * used_page_size. 2136 * 2137 */ 2138 struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, 2139 u32 max_num_sg) 2140 { 2141 struct ib_mr *mr; 2142 2143 if (!pd->device->ops.alloc_mr) { 2144 mr = ERR_PTR(-EOPNOTSUPP); 2145 goto out; 2146 } 2147 2148 if (mr_type == IB_MR_TYPE_INTEGRITY) { 2149 WARN_ON_ONCE(1); 2150 mr = ERR_PTR(-EINVAL); 2151 goto out; 2152 } 2153 2154 mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg); 2155 if (IS_ERR(mr)) 2156 goto out; 2157 2158 mr->device = pd->device; 2159 mr->pd = pd; 2160 mr->dm = NULL; 2161 mr->uobject = NULL; 2162 atomic_inc(&pd->usecnt); 2163 mr->need_inval = false; 2164 mr->type = mr_type; 2165 mr->sig_attrs = NULL; 2166 2167 rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR); 2168 rdma_restrack_parent_name(&mr->res, &pd->res); 2169 rdma_restrack_add(&mr->res); 2170 out: 2171 trace_mr_alloc(pd, mr_type, max_num_sg, mr); 2172 return mr; 2173 } 2174 EXPORT_SYMBOL(ib_alloc_mr); 2175 2176 /** 2177 * ib_alloc_mr_integrity() - Allocates an integrity memory region 2178 * @pd: protection domain associated with the region 2179 * @max_num_data_sg: maximum data sg entries available for registration 2180 * @max_num_meta_sg: maximum metadata sg entries available for 2181 * registration 2182 * 2183 * Notes: 2184 * Memory registration page/sg lists must not exceed max_num_sg, 2185 * also the integrity page/sg lists must not exceed max_num_meta_sg. 2186 * 2187 */ 2188 struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd, 2189 u32 max_num_data_sg, 2190 u32 max_num_meta_sg) 2191 { 2192 struct ib_mr *mr; 2193 struct ib_sig_attrs *sig_attrs; 2194 2195 if (!pd->device->ops.alloc_mr_integrity || 2196 !pd->device->ops.map_mr_sg_pi) { 2197 mr = ERR_PTR(-EOPNOTSUPP); 2198 goto out; 2199 } 2200 2201 if (!max_num_meta_sg) { 2202 mr = ERR_PTR(-EINVAL); 2203 goto out; 2204 } 2205 2206 sig_attrs = kzalloc(sizeof(struct ib_sig_attrs), GFP_KERNEL); 2207 if (!sig_attrs) { 2208 mr = ERR_PTR(-ENOMEM); 2209 goto out; 2210 } 2211 2212 mr = pd->device->ops.alloc_mr_integrity(pd, max_num_data_sg, 2213 max_num_meta_sg); 2214 if (IS_ERR(mr)) { 2215 kfree(sig_attrs); 2216 goto out; 2217 } 2218 2219 mr->device = pd->device; 2220 mr->pd = pd; 2221 mr->dm = NULL; 2222 mr->uobject = NULL; 2223 atomic_inc(&pd->usecnt); 2224 mr->need_inval = false; 2225 mr->type = IB_MR_TYPE_INTEGRITY; 2226 mr->sig_attrs = sig_attrs; 2227 2228 rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR); 2229 rdma_restrack_parent_name(&mr->res, &pd->res); 2230 rdma_restrack_add(&mr->res); 2231 out: 2232 trace_mr_integ_alloc(pd, max_num_data_sg, max_num_meta_sg, mr); 2233 return mr; 2234 } 2235 EXPORT_SYMBOL(ib_alloc_mr_integrity); 2236 2237 /* Multicast groups */ 2238 2239 static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid) 2240 { 2241 struct ib_qp_init_attr init_attr = {}; 2242 struct ib_qp_attr attr = {}; 2243 int num_eth_ports = 0; 2244 int port; 2245 2246 /* If QP state >= init, it is assigned to a port and we can check this 2247 * port only. 2248 */ 2249 if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) { 2250 if (attr.qp_state >= IB_QPS_INIT) { 2251 if (rdma_port_get_link_layer(qp->device, attr.port_num) != 2252 IB_LINK_LAYER_INFINIBAND) 2253 return true; 2254 goto lid_check; 2255 } 2256 } 2257 2258 /* Can't get a quick answer, iterate over all ports */ 2259 for (port = 0; port < qp->device->phys_port_cnt; port++) 2260 if (rdma_port_get_link_layer(qp->device, port) != 2261 IB_LINK_LAYER_INFINIBAND) 2262 num_eth_ports++; 2263 2264 /* If we have at lease one Ethernet port, RoCE annex declares that 2265 * multicast LID should be ignored. We can't tell at this step if the 2266 * QP belongs to an IB or Ethernet port. 2267 */ 2268 if (num_eth_ports) 2269 return true; 2270 2271 /* If all the ports are IB, we can check according to IB spec. */ 2272 lid_check: 2273 return !(lid < be16_to_cpu(IB_MULTICAST_LID_BASE) || 2274 lid == be16_to_cpu(IB_LID_PERMISSIVE)); 2275 } 2276 2277 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) 2278 { 2279 int ret; 2280 2281 if (!qp->device->ops.attach_mcast) 2282 return -EOPNOTSUPP; 2283 2284 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) || 2285 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid)) 2286 return -EINVAL; 2287 2288 ret = qp->device->ops.attach_mcast(qp, gid, lid); 2289 if (!ret) 2290 atomic_inc(&qp->usecnt); 2291 return ret; 2292 } 2293 EXPORT_SYMBOL(ib_attach_mcast); 2294 2295 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) 2296 { 2297 int ret; 2298 2299 if (!qp->device->ops.detach_mcast) 2300 return -EOPNOTSUPP; 2301 2302 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) || 2303 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid)) 2304 return -EINVAL; 2305 2306 ret = qp->device->ops.detach_mcast(qp, gid, lid); 2307 if (!ret) 2308 atomic_dec(&qp->usecnt); 2309 return ret; 2310 } 2311 EXPORT_SYMBOL(ib_detach_mcast); 2312 2313 /** 2314 * ib_alloc_xrcd_user - Allocates an XRC domain. 2315 * @device: The device on which to allocate the XRC domain. 2316 * @inode: inode to connect XRCD 2317 * @udata: Valid user data or NULL for kernel object 2318 */ 2319 struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device, 2320 struct inode *inode, struct ib_udata *udata) 2321 { 2322 struct ib_xrcd *xrcd; 2323 int ret; 2324 2325 if (!device->ops.alloc_xrcd) 2326 return ERR_PTR(-EOPNOTSUPP); 2327 2328 xrcd = rdma_zalloc_drv_obj(device, ib_xrcd); 2329 if (!xrcd) 2330 return ERR_PTR(-ENOMEM); 2331 2332 xrcd->device = device; 2333 xrcd->inode = inode; 2334 atomic_set(&xrcd->usecnt, 0); 2335 init_rwsem(&xrcd->tgt_qps_rwsem); 2336 xa_init(&xrcd->tgt_qps); 2337 2338 ret = device->ops.alloc_xrcd(xrcd, udata); 2339 if (ret) 2340 goto err; 2341 return xrcd; 2342 err: 2343 kfree(xrcd); 2344 return ERR_PTR(ret); 2345 } 2346 EXPORT_SYMBOL(ib_alloc_xrcd_user); 2347 2348 /** 2349 * ib_dealloc_xrcd_user - Deallocates an XRC domain. 2350 * @xrcd: The XRC domain to deallocate. 2351 * @udata: Valid user data or NULL for kernel object 2352 */ 2353 int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata) 2354 { 2355 int ret; 2356 2357 if (atomic_read(&xrcd->usecnt)) 2358 return -EBUSY; 2359 2360 WARN_ON(!xa_empty(&xrcd->tgt_qps)); 2361 ret = xrcd->device->ops.dealloc_xrcd(xrcd, udata); 2362 if (ret) 2363 return ret; 2364 kfree(xrcd); 2365 return ret; 2366 } 2367 EXPORT_SYMBOL(ib_dealloc_xrcd_user); 2368 2369 /** 2370 * ib_create_wq - Creates a WQ associated with the specified protection 2371 * domain. 2372 * @pd: The protection domain associated with the WQ. 2373 * @wq_attr: A list of initial attributes required to create the 2374 * WQ. If WQ creation succeeds, then the attributes are updated to 2375 * the actual capabilities of the created WQ. 2376 * 2377 * wq_attr->max_wr and wq_attr->max_sge determine 2378 * the requested size of the WQ, and set to the actual values allocated 2379 * on return. 2380 * If ib_create_wq() succeeds, then max_wr and max_sge will always be 2381 * at least as large as the requested values. 2382 */ 2383 struct ib_wq *ib_create_wq(struct ib_pd *pd, 2384 struct ib_wq_init_attr *wq_attr) 2385 { 2386 struct ib_wq *wq; 2387 2388 if (!pd->device->ops.create_wq) 2389 return ERR_PTR(-EOPNOTSUPP); 2390 2391 wq = pd->device->ops.create_wq(pd, wq_attr, NULL); 2392 if (!IS_ERR(wq)) { 2393 wq->event_handler = wq_attr->event_handler; 2394 wq->wq_context = wq_attr->wq_context; 2395 wq->wq_type = wq_attr->wq_type; 2396 wq->cq = wq_attr->cq; 2397 wq->device = pd->device; 2398 wq->pd = pd; 2399 wq->uobject = NULL; 2400 atomic_inc(&pd->usecnt); 2401 atomic_inc(&wq_attr->cq->usecnt); 2402 atomic_set(&wq->usecnt, 0); 2403 } 2404 return wq; 2405 } 2406 EXPORT_SYMBOL(ib_create_wq); 2407 2408 /** 2409 * ib_destroy_wq_user - Destroys the specified user WQ. 2410 * @wq: The WQ to destroy. 2411 * @udata: Valid user data 2412 */ 2413 int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata) 2414 { 2415 struct ib_cq *cq = wq->cq; 2416 struct ib_pd *pd = wq->pd; 2417 int ret; 2418 2419 if (atomic_read(&wq->usecnt)) 2420 return -EBUSY; 2421 2422 ret = wq->device->ops.destroy_wq(wq, udata); 2423 if (ret) 2424 return ret; 2425 2426 atomic_dec(&pd->usecnt); 2427 atomic_dec(&cq->usecnt); 2428 return ret; 2429 } 2430 EXPORT_SYMBOL(ib_destroy_wq_user); 2431 2432 /** 2433 * ib_modify_wq - Modifies the specified WQ. 2434 * @wq: The WQ to modify. 2435 * @wq_attr: On input, specifies the WQ attributes to modify. 2436 * @wq_attr_mask: A bit-mask used to specify which attributes of the WQ 2437 * are being modified. 2438 * On output, the current values of selected WQ attributes are returned. 2439 */ 2440 int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, 2441 u32 wq_attr_mask) 2442 { 2443 int err; 2444 2445 if (!wq->device->ops.modify_wq) 2446 return -EOPNOTSUPP; 2447 2448 err = wq->device->ops.modify_wq(wq, wq_attr, wq_attr_mask, NULL); 2449 return err; 2450 } 2451 EXPORT_SYMBOL(ib_modify_wq); 2452 2453 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, 2454 struct ib_mr_status *mr_status) 2455 { 2456 if (!mr->device->ops.check_mr_status) 2457 return -EOPNOTSUPP; 2458 2459 return mr->device->ops.check_mr_status(mr, check_mask, mr_status); 2460 } 2461 EXPORT_SYMBOL(ib_check_mr_status); 2462 2463 int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, 2464 int state) 2465 { 2466 if (!device->ops.set_vf_link_state) 2467 return -EOPNOTSUPP; 2468 2469 return device->ops.set_vf_link_state(device, vf, port, state); 2470 } 2471 EXPORT_SYMBOL(ib_set_vf_link_state); 2472 2473 int ib_get_vf_config(struct ib_device *device, int vf, u8 port, 2474 struct ifla_vf_info *info) 2475 { 2476 if (!device->ops.get_vf_config) 2477 return -EOPNOTSUPP; 2478 2479 return device->ops.get_vf_config(device, vf, port, info); 2480 } 2481 EXPORT_SYMBOL(ib_get_vf_config); 2482 2483 int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, 2484 struct ifla_vf_stats *stats) 2485 { 2486 if (!device->ops.get_vf_stats) 2487 return -EOPNOTSUPP; 2488 2489 return device->ops.get_vf_stats(device, vf, port, stats); 2490 } 2491 EXPORT_SYMBOL(ib_get_vf_stats); 2492 2493 int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, 2494 int type) 2495 { 2496 if (!device->ops.set_vf_guid) 2497 return -EOPNOTSUPP; 2498 2499 return device->ops.set_vf_guid(device, vf, port, guid, type); 2500 } 2501 EXPORT_SYMBOL(ib_set_vf_guid); 2502 2503 int ib_get_vf_guid(struct ib_device *device, int vf, u8 port, 2504 struct ifla_vf_guid *node_guid, 2505 struct ifla_vf_guid *port_guid) 2506 { 2507 if (!device->ops.get_vf_guid) 2508 return -EOPNOTSUPP; 2509 2510 return device->ops.get_vf_guid(device, vf, port, node_guid, port_guid); 2511 } 2512 EXPORT_SYMBOL(ib_get_vf_guid); 2513 /** 2514 * ib_map_mr_sg_pi() - Map the dma mapped SG lists for PI (protection 2515 * information) and set an appropriate memory region for registration. 2516 * @mr: memory region 2517 * @data_sg: dma mapped scatterlist for data 2518 * @data_sg_nents: number of entries in data_sg 2519 * @data_sg_offset: offset in bytes into data_sg 2520 * @meta_sg: dma mapped scatterlist for metadata 2521 * @meta_sg_nents: number of entries in meta_sg 2522 * @meta_sg_offset: offset in bytes into meta_sg 2523 * @page_size: page vector desired page size 2524 * 2525 * Constraints: 2526 * - The MR must be allocated with type IB_MR_TYPE_INTEGRITY. 2527 * 2528 * Return: 0 on success. 2529 * 2530 * After this completes successfully, the memory region 2531 * is ready for registration. 2532 */ 2533 int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg, 2534 int data_sg_nents, unsigned int *data_sg_offset, 2535 struct scatterlist *meta_sg, int meta_sg_nents, 2536 unsigned int *meta_sg_offset, unsigned int page_size) 2537 { 2538 if (unlikely(!mr->device->ops.map_mr_sg_pi || 2539 WARN_ON_ONCE(mr->type != IB_MR_TYPE_INTEGRITY))) 2540 return -EOPNOTSUPP; 2541 2542 mr->page_size = page_size; 2543 2544 return mr->device->ops.map_mr_sg_pi(mr, data_sg, data_sg_nents, 2545 data_sg_offset, meta_sg, 2546 meta_sg_nents, meta_sg_offset); 2547 } 2548 EXPORT_SYMBOL(ib_map_mr_sg_pi); 2549 2550 /** 2551 * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list 2552 * and set it the memory region. 2553 * @mr: memory region 2554 * @sg: dma mapped scatterlist 2555 * @sg_nents: number of entries in sg 2556 * @sg_offset: offset in bytes into sg 2557 * @page_size: page vector desired page size 2558 * 2559 * Constraints: 2560 * 2561 * - The first sg element is allowed to have an offset. 2562 * - Each sg element must either be aligned to page_size or virtually 2563 * contiguous to the previous element. In case an sg element has a 2564 * non-contiguous offset, the mapping prefix will not include it. 2565 * - The last sg element is allowed to have length less than page_size. 2566 * - If sg_nents total byte length exceeds the mr max_num_sge * page_size 2567 * then only max_num_sg entries will be mapped. 2568 * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these 2569 * constraints holds and the page_size argument is ignored. 2570 * 2571 * Returns the number of sg elements that were mapped to the memory region. 2572 * 2573 * After this completes successfully, the memory region 2574 * is ready for registration. 2575 */ 2576 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 2577 unsigned int *sg_offset, unsigned int page_size) 2578 { 2579 if (unlikely(!mr->device->ops.map_mr_sg)) 2580 return -EOPNOTSUPP; 2581 2582 mr->page_size = page_size; 2583 2584 return mr->device->ops.map_mr_sg(mr, sg, sg_nents, sg_offset); 2585 } 2586 EXPORT_SYMBOL(ib_map_mr_sg); 2587 2588 /** 2589 * ib_sg_to_pages() - Convert the largest prefix of a sg list 2590 * to a page vector 2591 * @mr: memory region 2592 * @sgl: dma mapped scatterlist 2593 * @sg_nents: number of entries in sg 2594 * @sg_offset_p: ==== ======================================================= 2595 * IN start offset in bytes into sg 2596 * OUT offset in bytes for element n of the sg of the first 2597 * byte that has not been processed where n is the return 2598 * value of this function. 2599 * ==== ======================================================= 2600 * @set_page: driver page assignment function pointer 2601 * 2602 * Core service helper for drivers to convert the largest 2603 * prefix of given sg list to a page vector. The sg list 2604 * prefix converted is the prefix that meet the requirements 2605 * of ib_map_mr_sg. 2606 * 2607 * Returns the number of sg elements that were assigned to 2608 * a page vector. 2609 */ 2610 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents, 2611 unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64)) 2612 { 2613 struct scatterlist *sg; 2614 u64 last_end_dma_addr = 0; 2615 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0; 2616 unsigned int last_page_off = 0; 2617 u64 page_mask = ~((u64)mr->page_size - 1); 2618 int i, ret; 2619 2620 if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0]))) 2621 return -EINVAL; 2622 2623 mr->iova = sg_dma_address(&sgl[0]) + sg_offset; 2624 mr->length = 0; 2625 2626 for_each_sg(sgl, sg, sg_nents, i) { 2627 u64 dma_addr = sg_dma_address(sg) + sg_offset; 2628 u64 prev_addr = dma_addr; 2629 unsigned int dma_len = sg_dma_len(sg) - sg_offset; 2630 u64 end_dma_addr = dma_addr + dma_len; 2631 u64 page_addr = dma_addr & page_mask; 2632 2633 /* 2634 * For the second and later elements, check whether either the 2635 * end of element i-1 or the start of element i is not aligned 2636 * on a page boundary. 2637 */ 2638 if (i && (last_page_off != 0 || page_addr != dma_addr)) { 2639 /* Stop mapping if there is a gap. */ 2640 if (last_end_dma_addr != dma_addr) 2641 break; 2642 2643 /* 2644 * Coalesce this element with the last. If it is small 2645 * enough just update mr->length. Otherwise start 2646 * mapping from the next page. 2647 */ 2648 goto next_page; 2649 } 2650 2651 do { 2652 ret = set_page(mr, page_addr); 2653 if (unlikely(ret < 0)) { 2654 sg_offset = prev_addr - sg_dma_address(sg); 2655 mr->length += prev_addr - dma_addr; 2656 if (sg_offset_p) 2657 *sg_offset_p = sg_offset; 2658 return i || sg_offset ? i : ret; 2659 } 2660 prev_addr = page_addr; 2661 next_page: 2662 page_addr += mr->page_size; 2663 } while (page_addr < end_dma_addr); 2664 2665 mr->length += dma_len; 2666 last_end_dma_addr = end_dma_addr; 2667 last_page_off = end_dma_addr & ~page_mask; 2668 2669 sg_offset = 0; 2670 } 2671 2672 if (sg_offset_p) 2673 *sg_offset_p = 0; 2674 return i; 2675 } 2676 EXPORT_SYMBOL(ib_sg_to_pages); 2677 2678 struct ib_drain_cqe { 2679 struct ib_cqe cqe; 2680 struct completion done; 2681 }; 2682 2683 static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc) 2684 { 2685 struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe, 2686 cqe); 2687 2688 complete(&cqe->done); 2689 } 2690 2691 /* 2692 * Post a WR and block until its completion is reaped for the SQ. 2693 */ 2694 static void __ib_drain_sq(struct ib_qp *qp) 2695 { 2696 struct ib_cq *cq = qp->send_cq; 2697 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; 2698 struct ib_drain_cqe sdrain; 2699 struct ib_rdma_wr swr = { 2700 .wr = { 2701 .next = NULL, 2702 { .wr_cqe = &sdrain.cqe, }, 2703 .opcode = IB_WR_RDMA_WRITE, 2704 }, 2705 }; 2706 int ret; 2707 2708 ret = ib_modify_qp(qp, &attr, IB_QP_STATE); 2709 if (ret) { 2710 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); 2711 return; 2712 } 2713 2714 sdrain.cqe.done = ib_drain_qp_done; 2715 init_completion(&sdrain.done); 2716 2717 ret = ib_post_send(qp, &swr.wr, NULL); 2718 if (ret) { 2719 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); 2720 return; 2721 } 2722 2723 if (cq->poll_ctx == IB_POLL_DIRECT) 2724 while (wait_for_completion_timeout(&sdrain.done, HZ / 10) <= 0) 2725 ib_process_cq_direct(cq, -1); 2726 else 2727 wait_for_completion(&sdrain.done); 2728 } 2729 2730 /* 2731 * Post a WR and block until its completion is reaped for the RQ. 2732 */ 2733 static void __ib_drain_rq(struct ib_qp *qp) 2734 { 2735 struct ib_cq *cq = qp->recv_cq; 2736 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; 2737 struct ib_drain_cqe rdrain; 2738 struct ib_recv_wr rwr = {}; 2739 int ret; 2740 2741 ret = ib_modify_qp(qp, &attr, IB_QP_STATE); 2742 if (ret) { 2743 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); 2744 return; 2745 } 2746 2747 rwr.wr_cqe = &rdrain.cqe; 2748 rdrain.cqe.done = ib_drain_qp_done; 2749 init_completion(&rdrain.done); 2750 2751 ret = ib_post_recv(qp, &rwr, NULL); 2752 if (ret) { 2753 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); 2754 return; 2755 } 2756 2757 if (cq->poll_ctx == IB_POLL_DIRECT) 2758 while (wait_for_completion_timeout(&rdrain.done, HZ / 10) <= 0) 2759 ib_process_cq_direct(cq, -1); 2760 else 2761 wait_for_completion(&rdrain.done); 2762 } 2763 2764 /** 2765 * ib_drain_sq() - Block until all SQ CQEs have been consumed by the 2766 * application. 2767 * @qp: queue pair to drain 2768 * 2769 * If the device has a provider-specific drain function, then 2770 * call that. Otherwise call the generic drain function 2771 * __ib_drain_sq(). 2772 * 2773 * The caller must: 2774 * 2775 * ensure there is room in the CQ and SQ for the drain work request and 2776 * completion. 2777 * 2778 * allocate the CQ using ib_alloc_cq(). 2779 * 2780 * ensure that there are no other contexts that are posting WRs concurrently. 2781 * Otherwise the drain is not guaranteed. 2782 */ 2783 void ib_drain_sq(struct ib_qp *qp) 2784 { 2785 if (qp->device->ops.drain_sq) 2786 qp->device->ops.drain_sq(qp); 2787 else 2788 __ib_drain_sq(qp); 2789 trace_cq_drain_complete(qp->send_cq); 2790 } 2791 EXPORT_SYMBOL(ib_drain_sq); 2792 2793 /** 2794 * ib_drain_rq() - Block until all RQ CQEs have been consumed by the 2795 * application. 2796 * @qp: queue pair to drain 2797 * 2798 * If the device has a provider-specific drain function, then 2799 * call that. Otherwise call the generic drain function 2800 * __ib_drain_rq(). 2801 * 2802 * The caller must: 2803 * 2804 * ensure there is room in the CQ and RQ for the drain work request and 2805 * completion. 2806 * 2807 * allocate the CQ using ib_alloc_cq(). 2808 * 2809 * ensure that there are no other contexts that are posting WRs concurrently. 2810 * Otherwise the drain is not guaranteed. 2811 */ 2812 void ib_drain_rq(struct ib_qp *qp) 2813 { 2814 if (qp->device->ops.drain_rq) 2815 qp->device->ops.drain_rq(qp); 2816 else 2817 __ib_drain_rq(qp); 2818 trace_cq_drain_complete(qp->recv_cq); 2819 } 2820 EXPORT_SYMBOL(ib_drain_rq); 2821 2822 /** 2823 * ib_drain_qp() - Block until all CQEs have been consumed by the 2824 * application on both the RQ and SQ. 2825 * @qp: queue pair to drain 2826 * 2827 * The caller must: 2828 * 2829 * ensure there is room in the CQ(s), SQ, and RQ for drain work requests 2830 * and completions. 2831 * 2832 * allocate the CQs using ib_alloc_cq(). 2833 * 2834 * ensure that there are no other contexts that are posting WRs concurrently. 2835 * Otherwise the drain is not guaranteed. 2836 */ 2837 void ib_drain_qp(struct ib_qp *qp) 2838 { 2839 ib_drain_sq(qp); 2840 if (!qp->srq) 2841 ib_drain_rq(qp); 2842 } 2843 EXPORT_SYMBOL(ib_drain_qp); 2844 2845 struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num, 2846 enum rdma_netdev_t type, const char *name, 2847 unsigned char name_assign_type, 2848 void (*setup)(struct net_device *)) 2849 { 2850 struct rdma_netdev_alloc_params params; 2851 struct net_device *netdev; 2852 int rc; 2853 2854 if (!device->ops.rdma_netdev_get_params) 2855 return ERR_PTR(-EOPNOTSUPP); 2856 2857 rc = device->ops.rdma_netdev_get_params(device, port_num, type, 2858 ¶ms); 2859 if (rc) 2860 return ERR_PTR(rc); 2861 2862 netdev = alloc_netdev_mqs(params.sizeof_priv, name, name_assign_type, 2863 setup, params.txqs, params.rxqs); 2864 if (!netdev) 2865 return ERR_PTR(-ENOMEM); 2866 2867 return netdev; 2868 } 2869 EXPORT_SYMBOL(rdma_alloc_netdev); 2870 2871 int rdma_init_netdev(struct ib_device *device, u8 port_num, 2872 enum rdma_netdev_t type, const char *name, 2873 unsigned char name_assign_type, 2874 void (*setup)(struct net_device *), 2875 struct net_device *netdev) 2876 { 2877 struct rdma_netdev_alloc_params params; 2878 int rc; 2879 2880 if (!device->ops.rdma_netdev_get_params) 2881 return -EOPNOTSUPP; 2882 2883 rc = device->ops.rdma_netdev_get_params(device, port_num, type, 2884 ¶ms); 2885 if (rc) 2886 return rc; 2887 2888 return params.initialize_rdma_netdev(device, port_num, 2889 netdev, params.param); 2890 } 2891 EXPORT_SYMBOL(rdma_init_netdev); 2892 2893 void __rdma_block_iter_start(struct ib_block_iter *biter, 2894 struct scatterlist *sglist, unsigned int nents, 2895 unsigned long pgsz) 2896 { 2897 memset(biter, 0, sizeof(struct ib_block_iter)); 2898 biter->__sg = sglist; 2899 biter->__sg_nents = nents; 2900 2901 /* Driver provides best block size to use */ 2902 biter->__pg_bit = __fls(pgsz); 2903 } 2904 EXPORT_SYMBOL(__rdma_block_iter_start); 2905 2906 bool __rdma_block_iter_next(struct ib_block_iter *biter) 2907 { 2908 unsigned int block_offset; 2909 2910 if (!biter->__sg_nents || !biter->__sg) 2911 return false; 2912 2913 biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance; 2914 block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1); 2915 biter->__sg_advance += BIT_ULL(biter->__pg_bit) - block_offset; 2916 2917 if (biter->__sg_advance >= sg_dma_len(biter->__sg)) { 2918 biter->__sg_advance = 0; 2919 biter->__sg = sg_next(biter->__sg); 2920 biter->__sg_nents--; 2921 } 2922 2923 return true; 2924 } 2925 EXPORT_SYMBOL(__rdma_block_iter_next); 2926