1 /* 2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 8 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. 9 * 10 * This software is available to you under a choice of one of two 11 * licenses. You may choose to be licensed under the terms of the GNU 12 * General Public License (GPL) Version 2, available from the file 13 * COPYING in the main directory of this source tree, or the 14 * OpenIB.org BSD license below: 15 * 16 * Redistribution and use in source and binary forms, with or 17 * without modification, are permitted provided that the following 18 * conditions are met: 19 * 20 * - Redistributions of source code must retain the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer. 23 * 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials 27 * provided with the distribution. 28 * 29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 36 * SOFTWARE. 37 */ 38 39 #include <linux/errno.h> 40 #include <linux/err.h> 41 #include <linux/export.h> 42 #include <linux/string.h> 43 #include <linux/slab.h> 44 #include <linux/in.h> 45 #include <linux/in6.h> 46 #include <net/addrconf.h> 47 48 #include <rdma/ib_verbs.h> 49 #include <rdma/ib_cache.h> 50 #include <rdma/ib_addr.h> 51 #include <rdma/rw.h> 52 53 #include "core_priv.h" 54 55 static const char * const ib_events[] = { 56 [IB_EVENT_CQ_ERR] = "CQ error", 57 [IB_EVENT_QP_FATAL] = "QP fatal error", 58 [IB_EVENT_QP_REQ_ERR] = "QP request error", 59 [IB_EVENT_QP_ACCESS_ERR] = "QP access error", 60 [IB_EVENT_COMM_EST] = "communication established", 61 [IB_EVENT_SQ_DRAINED] = "send queue drained", 62 [IB_EVENT_PATH_MIG] = "path migration successful", 63 [IB_EVENT_PATH_MIG_ERR] = "path migration error", 64 [IB_EVENT_DEVICE_FATAL] = "device fatal error", 65 [IB_EVENT_PORT_ACTIVE] = "port active", 66 [IB_EVENT_PORT_ERR] = "port error", 67 [IB_EVENT_LID_CHANGE] = "LID change", 68 [IB_EVENT_PKEY_CHANGE] = "P_key change", 69 [IB_EVENT_SM_CHANGE] = "SM change", 70 [IB_EVENT_SRQ_ERR] = "SRQ error", 71 [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached", 72 [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached", 73 [IB_EVENT_CLIENT_REREGISTER] = "client reregister", 74 [IB_EVENT_GID_CHANGE] = "GID changed", 75 }; 76 77 const char *__attribute_const__ ib_event_msg(enum ib_event_type event) 78 { 79 size_t index = event; 80 81 return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ? 82 ib_events[index] : "unrecognized event"; 83 } 84 EXPORT_SYMBOL(ib_event_msg); 85 86 static const char * const wc_statuses[] = { 87 [IB_WC_SUCCESS] = "success", 88 [IB_WC_LOC_LEN_ERR] = "local length error", 89 [IB_WC_LOC_QP_OP_ERR] = "local QP operation error", 90 [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error", 91 [IB_WC_LOC_PROT_ERR] = "local protection error", 92 [IB_WC_WR_FLUSH_ERR] = "WR flushed", 93 [IB_WC_MW_BIND_ERR] = "memory management operation error", 94 [IB_WC_BAD_RESP_ERR] = "bad response error", 95 [IB_WC_LOC_ACCESS_ERR] = "local access error", 96 [IB_WC_REM_INV_REQ_ERR] = "invalid request error", 97 [IB_WC_REM_ACCESS_ERR] = "remote access error", 98 [IB_WC_REM_OP_ERR] = "remote operation error", 99 [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded", 100 [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded", 101 [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error", 102 [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request", 103 [IB_WC_REM_ABORT_ERR] = "operation aborted", 104 [IB_WC_INV_EECN_ERR] = "invalid EE context number", 105 [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state", 106 [IB_WC_FATAL_ERR] = "fatal error", 107 [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error", 108 [IB_WC_GENERAL_ERR] = "general error", 109 }; 110 111 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status) 112 { 113 size_t index = status; 114 115 return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ? 116 wc_statuses[index] : "unrecognized status"; 117 } 118 EXPORT_SYMBOL(ib_wc_status_msg); 119 120 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate) 121 { 122 switch (rate) { 123 case IB_RATE_2_5_GBPS: return 1; 124 case IB_RATE_5_GBPS: return 2; 125 case IB_RATE_10_GBPS: return 4; 126 case IB_RATE_20_GBPS: return 8; 127 case IB_RATE_30_GBPS: return 12; 128 case IB_RATE_40_GBPS: return 16; 129 case IB_RATE_60_GBPS: return 24; 130 case IB_RATE_80_GBPS: return 32; 131 case IB_RATE_120_GBPS: return 48; 132 default: return -1; 133 } 134 } 135 EXPORT_SYMBOL(ib_rate_to_mult); 136 137 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult) 138 { 139 switch (mult) { 140 case 1: return IB_RATE_2_5_GBPS; 141 case 2: return IB_RATE_5_GBPS; 142 case 4: return IB_RATE_10_GBPS; 143 case 8: return IB_RATE_20_GBPS; 144 case 12: return IB_RATE_30_GBPS; 145 case 16: return IB_RATE_40_GBPS; 146 case 24: return IB_RATE_60_GBPS; 147 case 32: return IB_RATE_80_GBPS; 148 case 48: return IB_RATE_120_GBPS; 149 default: return IB_RATE_PORT_CURRENT; 150 } 151 } 152 EXPORT_SYMBOL(mult_to_ib_rate); 153 154 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate) 155 { 156 switch (rate) { 157 case IB_RATE_2_5_GBPS: return 2500; 158 case IB_RATE_5_GBPS: return 5000; 159 case IB_RATE_10_GBPS: return 10000; 160 case IB_RATE_20_GBPS: return 20000; 161 case IB_RATE_30_GBPS: return 30000; 162 case IB_RATE_40_GBPS: return 40000; 163 case IB_RATE_60_GBPS: return 60000; 164 case IB_RATE_80_GBPS: return 80000; 165 case IB_RATE_120_GBPS: return 120000; 166 case IB_RATE_14_GBPS: return 14062; 167 case IB_RATE_56_GBPS: return 56250; 168 case IB_RATE_112_GBPS: return 112500; 169 case IB_RATE_168_GBPS: return 168750; 170 case IB_RATE_25_GBPS: return 25781; 171 case IB_RATE_100_GBPS: return 103125; 172 case IB_RATE_200_GBPS: return 206250; 173 case IB_RATE_300_GBPS: return 309375; 174 default: return -1; 175 } 176 } 177 EXPORT_SYMBOL(ib_rate_to_mbps); 178 179 __attribute_const__ enum rdma_transport_type 180 rdma_node_get_transport(enum rdma_node_type node_type) 181 { 182 switch (node_type) { 183 case RDMA_NODE_IB_CA: 184 case RDMA_NODE_IB_SWITCH: 185 case RDMA_NODE_IB_ROUTER: 186 return RDMA_TRANSPORT_IB; 187 case RDMA_NODE_RNIC: 188 return RDMA_TRANSPORT_IWARP; 189 case RDMA_NODE_USNIC: 190 return RDMA_TRANSPORT_USNIC; 191 case RDMA_NODE_USNIC_UDP: 192 return RDMA_TRANSPORT_USNIC_UDP; 193 default: 194 BUG(); 195 return 0; 196 } 197 } 198 EXPORT_SYMBOL(rdma_node_get_transport); 199 200 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num) 201 { 202 if (device->get_link_layer) 203 return device->get_link_layer(device, port_num); 204 205 switch (rdma_node_get_transport(device->node_type)) { 206 case RDMA_TRANSPORT_IB: 207 return IB_LINK_LAYER_INFINIBAND; 208 case RDMA_TRANSPORT_IWARP: 209 case RDMA_TRANSPORT_USNIC: 210 case RDMA_TRANSPORT_USNIC_UDP: 211 return IB_LINK_LAYER_ETHERNET; 212 default: 213 return IB_LINK_LAYER_UNSPECIFIED; 214 } 215 } 216 EXPORT_SYMBOL(rdma_port_get_link_layer); 217 218 /* Protection domains */ 219 220 /** 221 * ib_alloc_pd - Allocates an unused protection domain. 222 * @device: The device on which to allocate the protection domain. 223 * 224 * A protection domain object provides an association between QPs, shared 225 * receive queues, address handles, memory regions, and memory windows. 226 * 227 * Every PD has a local_dma_lkey which can be used as the lkey value for local 228 * memory operations. 229 */ 230 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, 231 const char *caller) 232 { 233 struct ib_pd *pd; 234 int mr_access_flags = 0; 235 236 pd = device->alloc_pd(device, NULL, NULL); 237 if (IS_ERR(pd)) 238 return pd; 239 240 pd->device = device; 241 pd->uobject = NULL; 242 pd->__internal_mr = NULL; 243 atomic_set(&pd->usecnt, 0); 244 pd->flags = flags; 245 246 if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) 247 pd->local_dma_lkey = device->local_dma_lkey; 248 else 249 mr_access_flags |= IB_ACCESS_LOCAL_WRITE; 250 251 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) { 252 pr_warn("%s: enabling unsafe global rkey\n", caller); 253 mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE; 254 } 255 256 if (mr_access_flags) { 257 struct ib_mr *mr; 258 259 mr = pd->device->get_dma_mr(pd, mr_access_flags); 260 if (IS_ERR(mr)) { 261 ib_dealloc_pd(pd); 262 return ERR_CAST(mr); 263 } 264 265 mr->device = pd->device; 266 mr->pd = pd; 267 mr->uobject = NULL; 268 mr->need_inval = false; 269 270 pd->__internal_mr = mr; 271 272 if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) 273 pd->local_dma_lkey = pd->__internal_mr->lkey; 274 275 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) 276 pd->unsafe_global_rkey = pd->__internal_mr->rkey; 277 } 278 279 return pd; 280 } 281 EXPORT_SYMBOL(__ib_alloc_pd); 282 283 /** 284 * ib_dealloc_pd - Deallocates a protection domain. 285 * @pd: The protection domain to deallocate. 286 * 287 * It is an error to call this function while any resources in the pd still 288 * exist. The caller is responsible to synchronously destroy them and 289 * guarantee no new allocations will happen. 290 */ 291 void ib_dealloc_pd(struct ib_pd *pd) 292 { 293 int ret; 294 295 if (pd->__internal_mr) { 296 ret = pd->device->dereg_mr(pd->__internal_mr); 297 WARN_ON(ret); 298 pd->__internal_mr = NULL; 299 } 300 301 /* uverbs manipulates usecnt with proper locking, while the kabi 302 requires the caller to guarantee we can't race here. */ 303 WARN_ON(atomic_read(&pd->usecnt)); 304 305 /* Making delalloc_pd a void return is a WIP, no driver should return 306 an error here. */ 307 ret = pd->device->dealloc_pd(pd); 308 WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd"); 309 } 310 EXPORT_SYMBOL(ib_dealloc_pd); 311 312 /* Address handles */ 313 314 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) 315 { 316 struct ib_ah *ah; 317 318 ah = pd->device->create_ah(pd, ah_attr); 319 320 if (!IS_ERR(ah)) { 321 ah->device = pd->device; 322 ah->pd = pd; 323 ah->uobject = NULL; 324 atomic_inc(&pd->usecnt); 325 } 326 327 return ah; 328 } 329 EXPORT_SYMBOL(ib_create_ah); 330 331 static int ib_get_header_version(const union rdma_network_hdr *hdr) 332 { 333 const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh; 334 struct iphdr ip4h_checked; 335 const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh; 336 337 /* If it's IPv6, the version must be 6, otherwise, the first 338 * 20 bytes (before the IPv4 header) are garbled. 339 */ 340 if (ip6h->version != 6) 341 return (ip4h->version == 4) ? 4 : 0; 342 /* version may be 6 or 4 because the first 20 bytes could be garbled */ 343 344 /* RoCE v2 requires no options, thus header length 345 * must be 5 words 346 */ 347 if (ip4h->ihl != 5) 348 return 6; 349 350 /* Verify checksum. 351 * We can't write on scattered buffers so we need to copy to 352 * temp buffer. 353 */ 354 memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked)); 355 ip4h_checked.check = 0; 356 ip4h_checked.check = ip_fast_csum((u8 *)&ip4h_checked, 5); 357 /* if IPv4 header checksum is OK, believe it */ 358 if (ip4h->check == ip4h_checked.check) 359 return 4; 360 return 6; 361 } 362 363 static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device, 364 u8 port_num, 365 const struct ib_grh *grh) 366 { 367 int grh_version; 368 369 if (rdma_protocol_ib(device, port_num)) 370 return RDMA_NETWORK_IB; 371 372 grh_version = ib_get_header_version((union rdma_network_hdr *)grh); 373 374 if (grh_version == 4) 375 return RDMA_NETWORK_IPV4; 376 377 if (grh->next_hdr == IPPROTO_UDP) 378 return RDMA_NETWORK_IPV6; 379 380 return RDMA_NETWORK_ROCE_V1; 381 } 382 383 struct find_gid_index_context { 384 u16 vlan_id; 385 enum ib_gid_type gid_type; 386 }; 387 388 static bool find_gid_index(const union ib_gid *gid, 389 const struct ib_gid_attr *gid_attr, 390 void *context) 391 { 392 struct find_gid_index_context *ctx = 393 (struct find_gid_index_context *)context; 394 395 if (ctx->gid_type != gid_attr->gid_type) 396 return false; 397 398 if ((!!(ctx->vlan_id != 0xffff) == !is_vlan_dev(gid_attr->ndev)) || 399 (is_vlan_dev(gid_attr->ndev) && 400 vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id)) 401 return false; 402 403 return true; 404 } 405 406 static int get_sgid_index_from_eth(struct ib_device *device, u8 port_num, 407 u16 vlan_id, const union ib_gid *sgid, 408 enum ib_gid_type gid_type, 409 u16 *gid_index) 410 { 411 struct find_gid_index_context context = {.vlan_id = vlan_id, 412 .gid_type = gid_type}; 413 414 return ib_find_gid_by_filter(device, sgid, port_num, find_gid_index, 415 &context, gid_index); 416 } 417 418 static int get_gids_from_rdma_hdr(union rdma_network_hdr *hdr, 419 enum rdma_network_type net_type, 420 union ib_gid *sgid, union ib_gid *dgid) 421 { 422 struct sockaddr_in src_in; 423 struct sockaddr_in dst_in; 424 __be32 src_saddr, dst_saddr; 425 426 if (!sgid || !dgid) 427 return -EINVAL; 428 429 if (net_type == RDMA_NETWORK_IPV4) { 430 memcpy(&src_in.sin_addr.s_addr, 431 &hdr->roce4grh.saddr, 4); 432 memcpy(&dst_in.sin_addr.s_addr, 433 &hdr->roce4grh.daddr, 4); 434 src_saddr = src_in.sin_addr.s_addr; 435 dst_saddr = dst_in.sin_addr.s_addr; 436 ipv6_addr_set_v4mapped(src_saddr, 437 (struct in6_addr *)sgid); 438 ipv6_addr_set_v4mapped(dst_saddr, 439 (struct in6_addr *)dgid); 440 return 0; 441 } else if (net_type == RDMA_NETWORK_IPV6 || 442 net_type == RDMA_NETWORK_IB) { 443 *dgid = hdr->ibgrh.dgid; 444 *sgid = hdr->ibgrh.sgid; 445 return 0; 446 } else { 447 return -EINVAL; 448 } 449 } 450 451 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, 452 const struct ib_wc *wc, const struct ib_grh *grh, 453 struct ib_ah_attr *ah_attr) 454 { 455 u32 flow_class; 456 u16 gid_index; 457 int ret; 458 enum rdma_network_type net_type = RDMA_NETWORK_IB; 459 enum ib_gid_type gid_type = IB_GID_TYPE_IB; 460 int hoplimit = 0xff; 461 union ib_gid dgid; 462 union ib_gid sgid; 463 464 memset(ah_attr, 0, sizeof *ah_attr); 465 if (rdma_cap_eth_ah(device, port_num)) { 466 if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE) 467 net_type = wc->network_hdr_type; 468 else 469 net_type = ib_get_net_type_by_grh(device, port_num, grh); 470 gid_type = ib_network_to_gid_type(net_type); 471 } 472 ret = get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type, 473 &sgid, &dgid); 474 if (ret) 475 return ret; 476 477 if (rdma_protocol_roce(device, port_num)) { 478 int if_index = 0; 479 u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ? 480 wc->vlan_id : 0xffff; 481 struct net_device *idev; 482 struct net_device *resolved_dev; 483 484 if (!(wc->wc_flags & IB_WC_GRH)) 485 return -EPROTOTYPE; 486 487 if (!device->get_netdev) 488 return -EOPNOTSUPP; 489 490 idev = device->get_netdev(device, port_num); 491 if (!idev) 492 return -ENODEV; 493 494 ret = rdma_addr_find_l2_eth_by_grh(&dgid, &sgid, 495 ah_attr->dmac, 496 wc->wc_flags & IB_WC_WITH_VLAN ? 497 NULL : &vlan_id, 498 &if_index, &hoplimit); 499 if (ret) { 500 dev_put(idev); 501 return ret; 502 } 503 504 resolved_dev = dev_get_by_index(&init_net, if_index); 505 if (resolved_dev->flags & IFF_LOOPBACK) { 506 dev_put(resolved_dev); 507 resolved_dev = idev; 508 dev_hold(resolved_dev); 509 } 510 rcu_read_lock(); 511 if (resolved_dev != idev && !rdma_is_upper_dev_rcu(idev, 512 resolved_dev)) 513 ret = -EHOSTUNREACH; 514 rcu_read_unlock(); 515 dev_put(idev); 516 dev_put(resolved_dev); 517 if (ret) 518 return ret; 519 520 ret = get_sgid_index_from_eth(device, port_num, vlan_id, 521 &dgid, gid_type, &gid_index); 522 if (ret) 523 return ret; 524 } 525 526 ah_attr->dlid = wc->slid; 527 ah_attr->sl = wc->sl; 528 ah_attr->src_path_bits = wc->dlid_path_bits; 529 ah_attr->port_num = port_num; 530 531 if (wc->wc_flags & IB_WC_GRH) { 532 ah_attr->ah_flags = IB_AH_GRH; 533 ah_attr->grh.dgid = sgid; 534 535 if (!rdma_cap_eth_ah(device, port_num)) { 536 if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) { 537 ret = ib_find_cached_gid_by_port(device, &dgid, 538 IB_GID_TYPE_IB, 539 port_num, NULL, 540 &gid_index); 541 if (ret) 542 return ret; 543 } else { 544 gid_index = 0; 545 } 546 } 547 548 ah_attr->grh.sgid_index = (u8) gid_index; 549 flow_class = be32_to_cpu(grh->version_tclass_flow); 550 ah_attr->grh.flow_label = flow_class & 0xFFFFF; 551 ah_attr->grh.hop_limit = hoplimit; 552 ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF; 553 } 554 return 0; 555 } 556 EXPORT_SYMBOL(ib_init_ah_from_wc); 557 558 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, 559 const struct ib_grh *grh, u8 port_num) 560 { 561 struct ib_ah_attr ah_attr; 562 int ret; 563 564 ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr); 565 if (ret) 566 return ERR_PTR(ret); 567 568 return ib_create_ah(pd, &ah_attr); 569 } 570 EXPORT_SYMBOL(ib_create_ah_from_wc); 571 572 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) 573 { 574 return ah->device->modify_ah ? 575 ah->device->modify_ah(ah, ah_attr) : 576 -ENOSYS; 577 } 578 EXPORT_SYMBOL(ib_modify_ah); 579 580 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) 581 { 582 return ah->device->query_ah ? 583 ah->device->query_ah(ah, ah_attr) : 584 -ENOSYS; 585 } 586 EXPORT_SYMBOL(ib_query_ah); 587 588 int ib_destroy_ah(struct ib_ah *ah) 589 { 590 struct ib_pd *pd; 591 int ret; 592 593 pd = ah->pd; 594 ret = ah->device->destroy_ah(ah); 595 if (!ret) 596 atomic_dec(&pd->usecnt); 597 598 return ret; 599 } 600 EXPORT_SYMBOL(ib_destroy_ah); 601 602 /* Shared receive queues */ 603 604 struct ib_srq *ib_create_srq(struct ib_pd *pd, 605 struct ib_srq_init_attr *srq_init_attr) 606 { 607 struct ib_srq *srq; 608 609 if (!pd->device->create_srq) 610 return ERR_PTR(-ENOSYS); 611 612 srq = pd->device->create_srq(pd, srq_init_attr, NULL); 613 614 if (!IS_ERR(srq)) { 615 srq->device = pd->device; 616 srq->pd = pd; 617 srq->uobject = NULL; 618 srq->event_handler = srq_init_attr->event_handler; 619 srq->srq_context = srq_init_attr->srq_context; 620 srq->srq_type = srq_init_attr->srq_type; 621 if (srq->srq_type == IB_SRQT_XRC) { 622 srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd; 623 srq->ext.xrc.cq = srq_init_attr->ext.xrc.cq; 624 atomic_inc(&srq->ext.xrc.xrcd->usecnt); 625 atomic_inc(&srq->ext.xrc.cq->usecnt); 626 } 627 atomic_inc(&pd->usecnt); 628 atomic_set(&srq->usecnt, 0); 629 } 630 631 return srq; 632 } 633 EXPORT_SYMBOL(ib_create_srq); 634 635 int ib_modify_srq(struct ib_srq *srq, 636 struct ib_srq_attr *srq_attr, 637 enum ib_srq_attr_mask srq_attr_mask) 638 { 639 return srq->device->modify_srq ? 640 srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) : 641 -ENOSYS; 642 } 643 EXPORT_SYMBOL(ib_modify_srq); 644 645 int ib_query_srq(struct ib_srq *srq, 646 struct ib_srq_attr *srq_attr) 647 { 648 return srq->device->query_srq ? 649 srq->device->query_srq(srq, srq_attr) : -ENOSYS; 650 } 651 EXPORT_SYMBOL(ib_query_srq); 652 653 int ib_destroy_srq(struct ib_srq *srq) 654 { 655 struct ib_pd *pd; 656 enum ib_srq_type srq_type; 657 struct ib_xrcd *uninitialized_var(xrcd); 658 struct ib_cq *uninitialized_var(cq); 659 int ret; 660 661 if (atomic_read(&srq->usecnt)) 662 return -EBUSY; 663 664 pd = srq->pd; 665 srq_type = srq->srq_type; 666 if (srq_type == IB_SRQT_XRC) { 667 xrcd = srq->ext.xrc.xrcd; 668 cq = srq->ext.xrc.cq; 669 } 670 671 ret = srq->device->destroy_srq(srq); 672 if (!ret) { 673 atomic_dec(&pd->usecnt); 674 if (srq_type == IB_SRQT_XRC) { 675 atomic_dec(&xrcd->usecnt); 676 atomic_dec(&cq->usecnt); 677 } 678 } 679 680 return ret; 681 } 682 EXPORT_SYMBOL(ib_destroy_srq); 683 684 /* Queue pairs */ 685 686 static void __ib_shared_qp_event_handler(struct ib_event *event, void *context) 687 { 688 struct ib_qp *qp = context; 689 unsigned long flags; 690 691 spin_lock_irqsave(&qp->device->event_handler_lock, flags); 692 list_for_each_entry(event->element.qp, &qp->open_list, open_list) 693 if (event->element.qp->event_handler) 694 event->element.qp->event_handler(event, event->element.qp->qp_context); 695 spin_unlock_irqrestore(&qp->device->event_handler_lock, flags); 696 } 697 698 static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) 699 { 700 mutex_lock(&xrcd->tgt_qp_mutex); 701 list_add(&qp->xrcd_list, &xrcd->tgt_qp_list); 702 mutex_unlock(&xrcd->tgt_qp_mutex); 703 } 704 705 static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp, 706 void (*event_handler)(struct ib_event *, void *), 707 void *qp_context) 708 { 709 struct ib_qp *qp; 710 unsigned long flags; 711 712 qp = kzalloc(sizeof *qp, GFP_KERNEL); 713 if (!qp) 714 return ERR_PTR(-ENOMEM); 715 716 qp->real_qp = real_qp; 717 atomic_inc(&real_qp->usecnt); 718 qp->device = real_qp->device; 719 qp->event_handler = event_handler; 720 qp->qp_context = qp_context; 721 qp->qp_num = real_qp->qp_num; 722 qp->qp_type = real_qp->qp_type; 723 724 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags); 725 list_add(&qp->open_list, &real_qp->open_list); 726 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); 727 728 return qp; 729 } 730 731 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, 732 struct ib_qp_open_attr *qp_open_attr) 733 { 734 struct ib_qp *qp, *real_qp; 735 736 if (qp_open_attr->qp_type != IB_QPT_XRC_TGT) 737 return ERR_PTR(-EINVAL); 738 739 qp = ERR_PTR(-EINVAL); 740 mutex_lock(&xrcd->tgt_qp_mutex); 741 list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) { 742 if (real_qp->qp_num == qp_open_attr->qp_num) { 743 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler, 744 qp_open_attr->qp_context); 745 break; 746 } 747 } 748 mutex_unlock(&xrcd->tgt_qp_mutex); 749 return qp; 750 } 751 EXPORT_SYMBOL(ib_open_qp); 752 753 static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp, 754 struct ib_qp_init_attr *qp_init_attr) 755 { 756 struct ib_qp *real_qp = qp; 757 758 qp->event_handler = __ib_shared_qp_event_handler; 759 qp->qp_context = qp; 760 qp->pd = NULL; 761 qp->send_cq = qp->recv_cq = NULL; 762 qp->srq = NULL; 763 qp->xrcd = qp_init_attr->xrcd; 764 atomic_inc(&qp_init_attr->xrcd->usecnt); 765 INIT_LIST_HEAD(&qp->open_list); 766 767 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler, 768 qp_init_attr->qp_context); 769 if (!IS_ERR(qp)) 770 __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp); 771 else 772 real_qp->device->destroy_qp(real_qp); 773 return qp; 774 } 775 776 struct ib_qp *ib_create_qp(struct ib_pd *pd, 777 struct ib_qp_init_attr *qp_init_attr) 778 { 779 struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device; 780 struct ib_qp *qp; 781 int ret; 782 783 if (qp_init_attr->rwq_ind_tbl && 784 (qp_init_attr->recv_cq || 785 qp_init_attr->srq || qp_init_attr->cap.max_recv_wr || 786 qp_init_attr->cap.max_recv_sge)) 787 return ERR_PTR(-EINVAL); 788 789 /* 790 * If the callers is using the RDMA API calculate the resources 791 * needed for the RDMA READ/WRITE operations. 792 * 793 * Note that these callers need to pass in a port number. 794 */ 795 if (qp_init_attr->cap.max_rdma_ctxs) 796 rdma_rw_init_qp(device, qp_init_attr); 797 798 qp = device->create_qp(pd, qp_init_attr, NULL); 799 if (IS_ERR(qp)) 800 return qp; 801 802 qp->device = device; 803 qp->real_qp = qp; 804 qp->uobject = NULL; 805 qp->qp_type = qp_init_attr->qp_type; 806 qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl; 807 808 atomic_set(&qp->usecnt, 0); 809 qp->mrs_used = 0; 810 spin_lock_init(&qp->mr_lock); 811 INIT_LIST_HEAD(&qp->rdma_mrs); 812 INIT_LIST_HEAD(&qp->sig_mrs); 813 814 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) 815 return ib_create_xrc_qp(qp, qp_init_attr); 816 817 qp->event_handler = qp_init_attr->event_handler; 818 qp->qp_context = qp_init_attr->qp_context; 819 if (qp_init_attr->qp_type == IB_QPT_XRC_INI) { 820 qp->recv_cq = NULL; 821 qp->srq = NULL; 822 } else { 823 qp->recv_cq = qp_init_attr->recv_cq; 824 if (qp_init_attr->recv_cq) 825 atomic_inc(&qp_init_attr->recv_cq->usecnt); 826 qp->srq = qp_init_attr->srq; 827 if (qp->srq) 828 atomic_inc(&qp_init_attr->srq->usecnt); 829 } 830 831 qp->pd = pd; 832 qp->send_cq = qp_init_attr->send_cq; 833 qp->xrcd = NULL; 834 835 atomic_inc(&pd->usecnt); 836 if (qp_init_attr->send_cq) 837 atomic_inc(&qp_init_attr->send_cq->usecnt); 838 if (qp_init_attr->rwq_ind_tbl) 839 atomic_inc(&qp->rwq_ind_tbl->usecnt); 840 841 if (qp_init_attr->cap.max_rdma_ctxs) { 842 ret = rdma_rw_init_mrs(qp, qp_init_attr); 843 if (ret) { 844 pr_err("failed to init MR pool ret= %d\n", ret); 845 ib_destroy_qp(qp); 846 return ERR_PTR(ret); 847 } 848 } 849 850 /* 851 * Note: all hw drivers guarantee that max_send_sge is lower than 852 * the device RDMA WRITE SGE limit but not all hw drivers ensure that 853 * max_send_sge <= max_sge_rd. 854 */ 855 qp->max_write_sge = qp_init_attr->cap.max_send_sge; 856 qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge, 857 device->attrs.max_sge_rd); 858 859 return qp; 860 } 861 EXPORT_SYMBOL(ib_create_qp); 862 863 static const struct { 864 int valid; 865 enum ib_qp_attr_mask req_param[IB_QPT_MAX]; 866 enum ib_qp_attr_mask opt_param[IB_QPT_MAX]; 867 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { 868 [IB_QPS_RESET] = { 869 [IB_QPS_RESET] = { .valid = 1 }, 870 [IB_QPS_INIT] = { 871 .valid = 1, 872 .req_param = { 873 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 874 IB_QP_PORT | 875 IB_QP_QKEY), 876 [IB_QPT_RAW_PACKET] = IB_QP_PORT, 877 [IB_QPT_UC] = (IB_QP_PKEY_INDEX | 878 IB_QP_PORT | 879 IB_QP_ACCESS_FLAGS), 880 [IB_QPT_RC] = (IB_QP_PKEY_INDEX | 881 IB_QP_PORT | 882 IB_QP_ACCESS_FLAGS), 883 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | 884 IB_QP_PORT | 885 IB_QP_ACCESS_FLAGS), 886 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | 887 IB_QP_PORT | 888 IB_QP_ACCESS_FLAGS), 889 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 890 IB_QP_QKEY), 891 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 892 IB_QP_QKEY), 893 } 894 }, 895 }, 896 [IB_QPS_INIT] = { 897 [IB_QPS_RESET] = { .valid = 1 }, 898 [IB_QPS_ERR] = { .valid = 1 }, 899 [IB_QPS_INIT] = { 900 .valid = 1, 901 .opt_param = { 902 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 903 IB_QP_PORT | 904 IB_QP_QKEY), 905 [IB_QPT_UC] = (IB_QP_PKEY_INDEX | 906 IB_QP_PORT | 907 IB_QP_ACCESS_FLAGS), 908 [IB_QPT_RC] = (IB_QP_PKEY_INDEX | 909 IB_QP_PORT | 910 IB_QP_ACCESS_FLAGS), 911 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | 912 IB_QP_PORT | 913 IB_QP_ACCESS_FLAGS), 914 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | 915 IB_QP_PORT | 916 IB_QP_ACCESS_FLAGS), 917 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 918 IB_QP_QKEY), 919 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 920 IB_QP_QKEY), 921 } 922 }, 923 [IB_QPS_RTR] = { 924 .valid = 1, 925 .req_param = { 926 [IB_QPT_UC] = (IB_QP_AV | 927 IB_QP_PATH_MTU | 928 IB_QP_DEST_QPN | 929 IB_QP_RQ_PSN), 930 [IB_QPT_RC] = (IB_QP_AV | 931 IB_QP_PATH_MTU | 932 IB_QP_DEST_QPN | 933 IB_QP_RQ_PSN | 934 IB_QP_MAX_DEST_RD_ATOMIC | 935 IB_QP_MIN_RNR_TIMER), 936 [IB_QPT_XRC_INI] = (IB_QP_AV | 937 IB_QP_PATH_MTU | 938 IB_QP_DEST_QPN | 939 IB_QP_RQ_PSN), 940 [IB_QPT_XRC_TGT] = (IB_QP_AV | 941 IB_QP_PATH_MTU | 942 IB_QP_DEST_QPN | 943 IB_QP_RQ_PSN | 944 IB_QP_MAX_DEST_RD_ATOMIC | 945 IB_QP_MIN_RNR_TIMER), 946 }, 947 .opt_param = { 948 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 949 IB_QP_QKEY), 950 [IB_QPT_UC] = (IB_QP_ALT_PATH | 951 IB_QP_ACCESS_FLAGS | 952 IB_QP_PKEY_INDEX), 953 [IB_QPT_RC] = (IB_QP_ALT_PATH | 954 IB_QP_ACCESS_FLAGS | 955 IB_QP_PKEY_INDEX), 956 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH | 957 IB_QP_ACCESS_FLAGS | 958 IB_QP_PKEY_INDEX), 959 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH | 960 IB_QP_ACCESS_FLAGS | 961 IB_QP_PKEY_INDEX), 962 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 963 IB_QP_QKEY), 964 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 965 IB_QP_QKEY), 966 }, 967 }, 968 }, 969 [IB_QPS_RTR] = { 970 [IB_QPS_RESET] = { .valid = 1 }, 971 [IB_QPS_ERR] = { .valid = 1 }, 972 [IB_QPS_RTS] = { 973 .valid = 1, 974 .req_param = { 975 [IB_QPT_UD] = IB_QP_SQ_PSN, 976 [IB_QPT_UC] = IB_QP_SQ_PSN, 977 [IB_QPT_RC] = (IB_QP_TIMEOUT | 978 IB_QP_RETRY_CNT | 979 IB_QP_RNR_RETRY | 980 IB_QP_SQ_PSN | 981 IB_QP_MAX_QP_RD_ATOMIC), 982 [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT | 983 IB_QP_RETRY_CNT | 984 IB_QP_RNR_RETRY | 985 IB_QP_SQ_PSN | 986 IB_QP_MAX_QP_RD_ATOMIC), 987 [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT | 988 IB_QP_SQ_PSN), 989 [IB_QPT_SMI] = IB_QP_SQ_PSN, 990 [IB_QPT_GSI] = IB_QP_SQ_PSN, 991 }, 992 .opt_param = { 993 [IB_QPT_UD] = (IB_QP_CUR_STATE | 994 IB_QP_QKEY), 995 [IB_QPT_UC] = (IB_QP_CUR_STATE | 996 IB_QP_ALT_PATH | 997 IB_QP_ACCESS_FLAGS | 998 IB_QP_PATH_MIG_STATE), 999 [IB_QPT_RC] = (IB_QP_CUR_STATE | 1000 IB_QP_ALT_PATH | 1001 IB_QP_ACCESS_FLAGS | 1002 IB_QP_MIN_RNR_TIMER | 1003 IB_QP_PATH_MIG_STATE), 1004 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | 1005 IB_QP_ALT_PATH | 1006 IB_QP_ACCESS_FLAGS | 1007 IB_QP_PATH_MIG_STATE), 1008 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | 1009 IB_QP_ALT_PATH | 1010 IB_QP_ACCESS_FLAGS | 1011 IB_QP_MIN_RNR_TIMER | 1012 IB_QP_PATH_MIG_STATE), 1013 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 1014 IB_QP_QKEY), 1015 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 1016 IB_QP_QKEY), 1017 } 1018 } 1019 }, 1020 [IB_QPS_RTS] = { 1021 [IB_QPS_RESET] = { .valid = 1 }, 1022 [IB_QPS_ERR] = { .valid = 1 }, 1023 [IB_QPS_RTS] = { 1024 .valid = 1, 1025 .opt_param = { 1026 [IB_QPT_UD] = (IB_QP_CUR_STATE | 1027 IB_QP_QKEY), 1028 [IB_QPT_UC] = (IB_QP_CUR_STATE | 1029 IB_QP_ACCESS_FLAGS | 1030 IB_QP_ALT_PATH | 1031 IB_QP_PATH_MIG_STATE), 1032 [IB_QPT_RC] = (IB_QP_CUR_STATE | 1033 IB_QP_ACCESS_FLAGS | 1034 IB_QP_ALT_PATH | 1035 IB_QP_PATH_MIG_STATE | 1036 IB_QP_MIN_RNR_TIMER), 1037 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | 1038 IB_QP_ACCESS_FLAGS | 1039 IB_QP_ALT_PATH | 1040 IB_QP_PATH_MIG_STATE), 1041 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | 1042 IB_QP_ACCESS_FLAGS | 1043 IB_QP_ALT_PATH | 1044 IB_QP_PATH_MIG_STATE | 1045 IB_QP_MIN_RNR_TIMER), 1046 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 1047 IB_QP_QKEY), 1048 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 1049 IB_QP_QKEY), 1050 } 1051 }, 1052 [IB_QPS_SQD] = { 1053 .valid = 1, 1054 .opt_param = { 1055 [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1056 [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1057 [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1058 [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1059 [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */ 1060 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1061 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY 1062 } 1063 }, 1064 }, 1065 [IB_QPS_SQD] = { 1066 [IB_QPS_RESET] = { .valid = 1 }, 1067 [IB_QPS_ERR] = { .valid = 1 }, 1068 [IB_QPS_RTS] = { 1069 .valid = 1, 1070 .opt_param = { 1071 [IB_QPT_UD] = (IB_QP_CUR_STATE | 1072 IB_QP_QKEY), 1073 [IB_QPT_UC] = (IB_QP_CUR_STATE | 1074 IB_QP_ALT_PATH | 1075 IB_QP_ACCESS_FLAGS | 1076 IB_QP_PATH_MIG_STATE), 1077 [IB_QPT_RC] = (IB_QP_CUR_STATE | 1078 IB_QP_ALT_PATH | 1079 IB_QP_ACCESS_FLAGS | 1080 IB_QP_MIN_RNR_TIMER | 1081 IB_QP_PATH_MIG_STATE), 1082 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | 1083 IB_QP_ALT_PATH | 1084 IB_QP_ACCESS_FLAGS | 1085 IB_QP_PATH_MIG_STATE), 1086 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | 1087 IB_QP_ALT_PATH | 1088 IB_QP_ACCESS_FLAGS | 1089 IB_QP_MIN_RNR_TIMER | 1090 IB_QP_PATH_MIG_STATE), 1091 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 1092 IB_QP_QKEY), 1093 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 1094 IB_QP_QKEY), 1095 } 1096 }, 1097 [IB_QPS_SQD] = { 1098 .valid = 1, 1099 .opt_param = { 1100 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 1101 IB_QP_QKEY), 1102 [IB_QPT_UC] = (IB_QP_AV | 1103 IB_QP_ALT_PATH | 1104 IB_QP_ACCESS_FLAGS | 1105 IB_QP_PKEY_INDEX | 1106 IB_QP_PATH_MIG_STATE), 1107 [IB_QPT_RC] = (IB_QP_PORT | 1108 IB_QP_AV | 1109 IB_QP_TIMEOUT | 1110 IB_QP_RETRY_CNT | 1111 IB_QP_RNR_RETRY | 1112 IB_QP_MAX_QP_RD_ATOMIC | 1113 IB_QP_MAX_DEST_RD_ATOMIC | 1114 IB_QP_ALT_PATH | 1115 IB_QP_ACCESS_FLAGS | 1116 IB_QP_PKEY_INDEX | 1117 IB_QP_MIN_RNR_TIMER | 1118 IB_QP_PATH_MIG_STATE), 1119 [IB_QPT_XRC_INI] = (IB_QP_PORT | 1120 IB_QP_AV | 1121 IB_QP_TIMEOUT | 1122 IB_QP_RETRY_CNT | 1123 IB_QP_RNR_RETRY | 1124 IB_QP_MAX_QP_RD_ATOMIC | 1125 IB_QP_ALT_PATH | 1126 IB_QP_ACCESS_FLAGS | 1127 IB_QP_PKEY_INDEX | 1128 IB_QP_PATH_MIG_STATE), 1129 [IB_QPT_XRC_TGT] = (IB_QP_PORT | 1130 IB_QP_AV | 1131 IB_QP_TIMEOUT | 1132 IB_QP_MAX_DEST_RD_ATOMIC | 1133 IB_QP_ALT_PATH | 1134 IB_QP_ACCESS_FLAGS | 1135 IB_QP_PKEY_INDEX | 1136 IB_QP_MIN_RNR_TIMER | 1137 IB_QP_PATH_MIG_STATE), 1138 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 1139 IB_QP_QKEY), 1140 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 1141 IB_QP_QKEY), 1142 } 1143 } 1144 }, 1145 [IB_QPS_SQE] = { 1146 [IB_QPS_RESET] = { .valid = 1 }, 1147 [IB_QPS_ERR] = { .valid = 1 }, 1148 [IB_QPS_RTS] = { 1149 .valid = 1, 1150 .opt_param = { 1151 [IB_QPT_UD] = (IB_QP_CUR_STATE | 1152 IB_QP_QKEY), 1153 [IB_QPT_UC] = (IB_QP_CUR_STATE | 1154 IB_QP_ACCESS_FLAGS), 1155 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 1156 IB_QP_QKEY), 1157 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 1158 IB_QP_QKEY), 1159 } 1160 } 1161 }, 1162 [IB_QPS_ERR] = { 1163 [IB_QPS_RESET] = { .valid = 1 }, 1164 [IB_QPS_ERR] = { .valid = 1 } 1165 } 1166 }; 1167 1168 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, 1169 enum ib_qp_type type, enum ib_qp_attr_mask mask, 1170 enum rdma_link_layer ll) 1171 { 1172 enum ib_qp_attr_mask req_param, opt_param; 1173 1174 if (cur_state < 0 || cur_state > IB_QPS_ERR || 1175 next_state < 0 || next_state > IB_QPS_ERR) 1176 return 0; 1177 1178 if (mask & IB_QP_CUR_STATE && 1179 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS && 1180 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE) 1181 return 0; 1182 1183 if (!qp_state_table[cur_state][next_state].valid) 1184 return 0; 1185 1186 req_param = qp_state_table[cur_state][next_state].req_param[type]; 1187 opt_param = qp_state_table[cur_state][next_state].opt_param[type]; 1188 1189 if ((mask & req_param) != req_param) 1190 return 0; 1191 1192 if (mask & ~(req_param | opt_param | IB_QP_STATE)) 1193 return 0; 1194 1195 return 1; 1196 } 1197 EXPORT_SYMBOL(ib_modify_qp_is_ok); 1198 1199 int ib_resolve_eth_dmac(struct ib_qp *qp, 1200 struct ib_qp_attr *qp_attr, int *qp_attr_mask) 1201 { 1202 int ret = 0; 1203 1204 if (*qp_attr_mask & IB_QP_AV) { 1205 if (qp_attr->ah_attr.port_num < rdma_start_port(qp->device) || 1206 qp_attr->ah_attr.port_num > rdma_end_port(qp->device)) 1207 return -EINVAL; 1208 1209 if (!rdma_cap_eth_ah(qp->device, qp_attr->ah_attr.port_num)) 1210 return 0; 1211 1212 if (rdma_link_local_addr((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw)) { 1213 rdma_get_ll_mac((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw, 1214 qp_attr->ah_attr.dmac); 1215 } else { 1216 union ib_gid sgid; 1217 struct ib_gid_attr sgid_attr; 1218 int ifindex; 1219 int hop_limit; 1220 1221 ret = ib_query_gid(qp->device, 1222 qp_attr->ah_attr.port_num, 1223 qp_attr->ah_attr.grh.sgid_index, 1224 &sgid, &sgid_attr); 1225 1226 if (ret || !sgid_attr.ndev) { 1227 if (!ret) 1228 ret = -ENXIO; 1229 goto out; 1230 } 1231 1232 ifindex = sgid_attr.ndev->ifindex; 1233 1234 ret = rdma_addr_find_l2_eth_by_grh(&sgid, 1235 &qp_attr->ah_attr.grh.dgid, 1236 qp_attr->ah_attr.dmac, 1237 NULL, &ifindex, &hop_limit); 1238 1239 dev_put(sgid_attr.ndev); 1240 1241 qp_attr->ah_attr.grh.hop_limit = hop_limit; 1242 } 1243 } 1244 out: 1245 return ret; 1246 } 1247 EXPORT_SYMBOL(ib_resolve_eth_dmac); 1248 1249 1250 int ib_modify_qp(struct ib_qp *qp, 1251 struct ib_qp_attr *qp_attr, 1252 int qp_attr_mask) 1253 { 1254 int ret; 1255 1256 ret = ib_resolve_eth_dmac(qp, qp_attr, &qp_attr_mask); 1257 if (ret) 1258 return ret; 1259 1260 return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); 1261 } 1262 EXPORT_SYMBOL(ib_modify_qp); 1263 1264 int ib_query_qp(struct ib_qp *qp, 1265 struct ib_qp_attr *qp_attr, 1266 int qp_attr_mask, 1267 struct ib_qp_init_attr *qp_init_attr) 1268 { 1269 return qp->device->query_qp ? 1270 qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) : 1271 -ENOSYS; 1272 } 1273 EXPORT_SYMBOL(ib_query_qp); 1274 1275 int ib_close_qp(struct ib_qp *qp) 1276 { 1277 struct ib_qp *real_qp; 1278 unsigned long flags; 1279 1280 real_qp = qp->real_qp; 1281 if (real_qp == qp) 1282 return -EINVAL; 1283 1284 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags); 1285 list_del(&qp->open_list); 1286 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); 1287 1288 atomic_dec(&real_qp->usecnt); 1289 kfree(qp); 1290 1291 return 0; 1292 } 1293 EXPORT_SYMBOL(ib_close_qp); 1294 1295 static int __ib_destroy_shared_qp(struct ib_qp *qp) 1296 { 1297 struct ib_xrcd *xrcd; 1298 struct ib_qp *real_qp; 1299 int ret; 1300 1301 real_qp = qp->real_qp; 1302 xrcd = real_qp->xrcd; 1303 1304 mutex_lock(&xrcd->tgt_qp_mutex); 1305 ib_close_qp(qp); 1306 if (atomic_read(&real_qp->usecnt) == 0) 1307 list_del(&real_qp->xrcd_list); 1308 else 1309 real_qp = NULL; 1310 mutex_unlock(&xrcd->tgt_qp_mutex); 1311 1312 if (real_qp) { 1313 ret = ib_destroy_qp(real_qp); 1314 if (!ret) 1315 atomic_dec(&xrcd->usecnt); 1316 else 1317 __ib_insert_xrcd_qp(xrcd, real_qp); 1318 } 1319 1320 return 0; 1321 } 1322 1323 int ib_destroy_qp(struct ib_qp *qp) 1324 { 1325 struct ib_pd *pd; 1326 struct ib_cq *scq, *rcq; 1327 struct ib_srq *srq; 1328 struct ib_rwq_ind_table *ind_tbl; 1329 int ret; 1330 1331 WARN_ON_ONCE(qp->mrs_used > 0); 1332 1333 if (atomic_read(&qp->usecnt)) 1334 return -EBUSY; 1335 1336 if (qp->real_qp != qp) 1337 return __ib_destroy_shared_qp(qp); 1338 1339 pd = qp->pd; 1340 scq = qp->send_cq; 1341 rcq = qp->recv_cq; 1342 srq = qp->srq; 1343 ind_tbl = qp->rwq_ind_tbl; 1344 1345 if (!qp->uobject) 1346 rdma_rw_cleanup_mrs(qp); 1347 1348 ret = qp->device->destroy_qp(qp); 1349 if (!ret) { 1350 if (pd) 1351 atomic_dec(&pd->usecnt); 1352 if (scq) 1353 atomic_dec(&scq->usecnt); 1354 if (rcq) 1355 atomic_dec(&rcq->usecnt); 1356 if (srq) 1357 atomic_dec(&srq->usecnt); 1358 if (ind_tbl) 1359 atomic_dec(&ind_tbl->usecnt); 1360 } 1361 1362 return ret; 1363 } 1364 EXPORT_SYMBOL(ib_destroy_qp); 1365 1366 /* Completion queues */ 1367 1368 struct ib_cq *ib_create_cq(struct ib_device *device, 1369 ib_comp_handler comp_handler, 1370 void (*event_handler)(struct ib_event *, void *), 1371 void *cq_context, 1372 const struct ib_cq_init_attr *cq_attr) 1373 { 1374 struct ib_cq *cq; 1375 1376 cq = device->create_cq(device, cq_attr, NULL, NULL); 1377 1378 if (!IS_ERR(cq)) { 1379 cq->device = device; 1380 cq->uobject = NULL; 1381 cq->comp_handler = comp_handler; 1382 cq->event_handler = event_handler; 1383 cq->cq_context = cq_context; 1384 atomic_set(&cq->usecnt, 0); 1385 } 1386 1387 return cq; 1388 } 1389 EXPORT_SYMBOL(ib_create_cq); 1390 1391 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) 1392 { 1393 return cq->device->modify_cq ? 1394 cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS; 1395 } 1396 EXPORT_SYMBOL(ib_modify_cq); 1397 1398 int ib_destroy_cq(struct ib_cq *cq) 1399 { 1400 if (atomic_read(&cq->usecnt)) 1401 return -EBUSY; 1402 1403 return cq->device->destroy_cq(cq); 1404 } 1405 EXPORT_SYMBOL(ib_destroy_cq); 1406 1407 int ib_resize_cq(struct ib_cq *cq, int cqe) 1408 { 1409 return cq->device->resize_cq ? 1410 cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS; 1411 } 1412 EXPORT_SYMBOL(ib_resize_cq); 1413 1414 /* Memory regions */ 1415 1416 int ib_dereg_mr(struct ib_mr *mr) 1417 { 1418 struct ib_pd *pd = mr->pd; 1419 int ret; 1420 1421 ret = mr->device->dereg_mr(mr); 1422 if (!ret) 1423 atomic_dec(&pd->usecnt); 1424 1425 return ret; 1426 } 1427 EXPORT_SYMBOL(ib_dereg_mr); 1428 1429 /** 1430 * ib_alloc_mr() - Allocates a memory region 1431 * @pd: protection domain associated with the region 1432 * @mr_type: memory region type 1433 * @max_num_sg: maximum sg entries available for registration. 1434 * 1435 * Notes: 1436 * Memory registeration page/sg lists must not exceed max_num_sg. 1437 * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed 1438 * max_num_sg * used_page_size. 1439 * 1440 */ 1441 struct ib_mr *ib_alloc_mr(struct ib_pd *pd, 1442 enum ib_mr_type mr_type, 1443 u32 max_num_sg) 1444 { 1445 struct ib_mr *mr; 1446 1447 if (!pd->device->alloc_mr) 1448 return ERR_PTR(-ENOSYS); 1449 1450 mr = pd->device->alloc_mr(pd, mr_type, max_num_sg); 1451 if (!IS_ERR(mr)) { 1452 mr->device = pd->device; 1453 mr->pd = pd; 1454 mr->uobject = NULL; 1455 atomic_inc(&pd->usecnt); 1456 mr->need_inval = false; 1457 } 1458 1459 return mr; 1460 } 1461 EXPORT_SYMBOL(ib_alloc_mr); 1462 1463 /* "Fast" memory regions */ 1464 1465 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, 1466 int mr_access_flags, 1467 struct ib_fmr_attr *fmr_attr) 1468 { 1469 struct ib_fmr *fmr; 1470 1471 if (!pd->device->alloc_fmr) 1472 return ERR_PTR(-ENOSYS); 1473 1474 fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr); 1475 if (!IS_ERR(fmr)) { 1476 fmr->device = pd->device; 1477 fmr->pd = pd; 1478 atomic_inc(&pd->usecnt); 1479 } 1480 1481 return fmr; 1482 } 1483 EXPORT_SYMBOL(ib_alloc_fmr); 1484 1485 int ib_unmap_fmr(struct list_head *fmr_list) 1486 { 1487 struct ib_fmr *fmr; 1488 1489 if (list_empty(fmr_list)) 1490 return 0; 1491 1492 fmr = list_entry(fmr_list->next, struct ib_fmr, list); 1493 return fmr->device->unmap_fmr(fmr_list); 1494 } 1495 EXPORT_SYMBOL(ib_unmap_fmr); 1496 1497 int ib_dealloc_fmr(struct ib_fmr *fmr) 1498 { 1499 struct ib_pd *pd; 1500 int ret; 1501 1502 pd = fmr->pd; 1503 ret = fmr->device->dealloc_fmr(fmr); 1504 if (!ret) 1505 atomic_dec(&pd->usecnt); 1506 1507 return ret; 1508 } 1509 EXPORT_SYMBOL(ib_dealloc_fmr); 1510 1511 /* Multicast groups */ 1512 1513 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) 1514 { 1515 int ret; 1516 1517 if (!qp->device->attach_mcast) 1518 return -ENOSYS; 1519 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD) 1520 return -EINVAL; 1521 1522 ret = qp->device->attach_mcast(qp, gid, lid); 1523 if (!ret) 1524 atomic_inc(&qp->usecnt); 1525 return ret; 1526 } 1527 EXPORT_SYMBOL(ib_attach_mcast); 1528 1529 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) 1530 { 1531 int ret; 1532 1533 if (!qp->device->detach_mcast) 1534 return -ENOSYS; 1535 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD) 1536 return -EINVAL; 1537 1538 ret = qp->device->detach_mcast(qp, gid, lid); 1539 if (!ret) 1540 atomic_dec(&qp->usecnt); 1541 return ret; 1542 } 1543 EXPORT_SYMBOL(ib_detach_mcast); 1544 1545 struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device) 1546 { 1547 struct ib_xrcd *xrcd; 1548 1549 if (!device->alloc_xrcd) 1550 return ERR_PTR(-ENOSYS); 1551 1552 xrcd = device->alloc_xrcd(device, NULL, NULL); 1553 if (!IS_ERR(xrcd)) { 1554 xrcd->device = device; 1555 xrcd->inode = NULL; 1556 atomic_set(&xrcd->usecnt, 0); 1557 mutex_init(&xrcd->tgt_qp_mutex); 1558 INIT_LIST_HEAD(&xrcd->tgt_qp_list); 1559 } 1560 1561 return xrcd; 1562 } 1563 EXPORT_SYMBOL(ib_alloc_xrcd); 1564 1565 int ib_dealloc_xrcd(struct ib_xrcd *xrcd) 1566 { 1567 struct ib_qp *qp; 1568 int ret; 1569 1570 if (atomic_read(&xrcd->usecnt)) 1571 return -EBUSY; 1572 1573 while (!list_empty(&xrcd->tgt_qp_list)) { 1574 qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list); 1575 ret = ib_destroy_qp(qp); 1576 if (ret) 1577 return ret; 1578 } 1579 1580 return xrcd->device->dealloc_xrcd(xrcd); 1581 } 1582 EXPORT_SYMBOL(ib_dealloc_xrcd); 1583 1584 /** 1585 * ib_create_wq - Creates a WQ associated with the specified protection 1586 * domain. 1587 * @pd: The protection domain associated with the WQ. 1588 * @wq_init_attr: A list of initial attributes required to create the 1589 * WQ. If WQ creation succeeds, then the attributes are updated to 1590 * the actual capabilities of the created WQ. 1591 * 1592 * wq_init_attr->max_wr and wq_init_attr->max_sge determine 1593 * the requested size of the WQ, and set to the actual values allocated 1594 * on return. 1595 * If ib_create_wq() succeeds, then max_wr and max_sge will always be 1596 * at least as large as the requested values. 1597 */ 1598 struct ib_wq *ib_create_wq(struct ib_pd *pd, 1599 struct ib_wq_init_attr *wq_attr) 1600 { 1601 struct ib_wq *wq; 1602 1603 if (!pd->device->create_wq) 1604 return ERR_PTR(-ENOSYS); 1605 1606 wq = pd->device->create_wq(pd, wq_attr, NULL); 1607 if (!IS_ERR(wq)) { 1608 wq->event_handler = wq_attr->event_handler; 1609 wq->wq_context = wq_attr->wq_context; 1610 wq->wq_type = wq_attr->wq_type; 1611 wq->cq = wq_attr->cq; 1612 wq->device = pd->device; 1613 wq->pd = pd; 1614 wq->uobject = NULL; 1615 atomic_inc(&pd->usecnt); 1616 atomic_inc(&wq_attr->cq->usecnt); 1617 atomic_set(&wq->usecnt, 0); 1618 } 1619 return wq; 1620 } 1621 EXPORT_SYMBOL(ib_create_wq); 1622 1623 /** 1624 * ib_destroy_wq - Destroys the specified WQ. 1625 * @wq: The WQ to destroy. 1626 */ 1627 int ib_destroy_wq(struct ib_wq *wq) 1628 { 1629 int err; 1630 struct ib_cq *cq = wq->cq; 1631 struct ib_pd *pd = wq->pd; 1632 1633 if (atomic_read(&wq->usecnt)) 1634 return -EBUSY; 1635 1636 err = wq->device->destroy_wq(wq); 1637 if (!err) { 1638 atomic_dec(&pd->usecnt); 1639 atomic_dec(&cq->usecnt); 1640 } 1641 return err; 1642 } 1643 EXPORT_SYMBOL(ib_destroy_wq); 1644 1645 /** 1646 * ib_modify_wq - Modifies the specified WQ. 1647 * @wq: The WQ to modify. 1648 * @wq_attr: On input, specifies the WQ attributes to modify. 1649 * @wq_attr_mask: A bit-mask used to specify which attributes of the WQ 1650 * are being modified. 1651 * On output, the current values of selected WQ attributes are returned. 1652 */ 1653 int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, 1654 u32 wq_attr_mask) 1655 { 1656 int err; 1657 1658 if (!wq->device->modify_wq) 1659 return -ENOSYS; 1660 1661 err = wq->device->modify_wq(wq, wq_attr, wq_attr_mask, NULL); 1662 return err; 1663 } 1664 EXPORT_SYMBOL(ib_modify_wq); 1665 1666 /* 1667 * ib_create_rwq_ind_table - Creates a RQ Indirection Table. 1668 * @device: The device on which to create the rwq indirection table. 1669 * @ib_rwq_ind_table_init_attr: A list of initial attributes required to 1670 * create the Indirection Table. 1671 * 1672 * Note: The life time of ib_rwq_ind_table_init_attr->ind_tbl is not less 1673 * than the created ib_rwq_ind_table object and the caller is responsible 1674 * for its memory allocation/free. 1675 */ 1676 struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device, 1677 struct ib_rwq_ind_table_init_attr *init_attr) 1678 { 1679 struct ib_rwq_ind_table *rwq_ind_table; 1680 int i; 1681 u32 table_size; 1682 1683 if (!device->create_rwq_ind_table) 1684 return ERR_PTR(-ENOSYS); 1685 1686 table_size = (1 << init_attr->log_ind_tbl_size); 1687 rwq_ind_table = device->create_rwq_ind_table(device, 1688 init_attr, NULL); 1689 if (IS_ERR(rwq_ind_table)) 1690 return rwq_ind_table; 1691 1692 rwq_ind_table->ind_tbl = init_attr->ind_tbl; 1693 rwq_ind_table->log_ind_tbl_size = init_attr->log_ind_tbl_size; 1694 rwq_ind_table->device = device; 1695 rwq_ind_table->uobject = NULL; 1696 atomic_set(&rwq_ind_table->usecnt, 0); 1697 1698 for (i = 0; i < table_size; i++) 1699 atomic_inc(&rwq_ind_table->ind_tbl[i]->usecnt); 1700 1701 return rwq_ind_table; 1702 } 1703 EXPORT_SYMBOL(ib_create_rwq_ind_table); 1704 1705 /* 1706 * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table. 1707 * @wq_ind_table: The Indirection Table to destroy. 1708 */ 1709 int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table) 1710 { 1711 int err, i; 1712 u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size); 1713 struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl; 1714 1715 if (atomic_read(&rwq_ind_table->usecnt)) 1716 return -EBUSY; 1717 1718 err = rwq_ind_table->device->destroy_rwq_ind_table(rwq_ind_table); 1719 if (!err) { 1720 for (i = 0; i < table_size; i++) 1721 atomic_dec(&ind_tbl[i]->usecnt); 1722 } 1723 1724 return err; 1725 } 1726 EXPORT_SYMBOL(ib_destroy_rwq_ind_table); 1727 1728 struct ib_flow *ib_create_flow(struct ib_qp *qp, 1729 struct ib_flow_attr *flow_attr, 1730 int domain) 1731 { 1732 struct ib_flow *flow_id; 1733 if (!qp->device->create_flow) 1734 return ERR_PTR(-ENOSYS); 1735 1736 flow_id = qp->device->create_flow(qp, flow_attr, domain); 1737 if (!IS_ERR(flow_id)) 1738 atomic_inc(&qp->usecnt); 1739 return flow_id; 1740 } 1741 EXPORT_SYMBOL(ib_create_flow); 1742 1743 int ib_destroy_flow(struct ib_flow *flow_id) 1744 { 1745 int err; 1746 struct ib_qp *qp = flow_id->qp; 1747 1748 err = qp->device->destroy_flow(flow_id); 1749 if (!err) 1750 atomic_dec(&qp->usecnt); 1751 return err; 1752 } 1753 EXPORT_SYMBOL(ib_destroy_flow); 1754 1755 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, 1756 struct ib_mr_status *mr_status) 1757 { 1758 return mr->device->check_mr_status ? 1759 mr->device->check_mr_status(mr, check_mask, mr_status) : -ENOSYS; 1760 } 1761 EXPORT_SYMBOL(ib_check_mr_status); 1762 1763 int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, 1764 int state) 1765 { 1766 if (!device->set_vf_link_state) 1767 return -ENOSYS; 1768 1769 return device->set_vf_link_state(device, vf, port, state); 1770 } 1771 EXPORT_SYMBOL(ib_set_vf_link_state); 1772 1773 int ib_get_vf_config(struct ib_device *device, int vf, u8 port, 1774 struct ifla_vf_info *info) 1775 { 1776 if (!device->get_vf_config) 1777 return -ENOSYS; 1778 1779 return device->get_vf_config(device, vf, port, info); 1780 } 1781 EXPORT_SYMBOL(ib_get_vf_config); 1782 1783 int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, 1784 struct ifla_vf_stats *stats) 1785 { 1786 if (!device->get_vf_stats) 1787 return -ENOSYS; 1788 1789 return device->get_vf_stats(device, vf, port, stats); 1790 } 1791 EXPORT_SYMBOL(ib_get_vf_stats); 1792 1793 int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, 1794 int type) 1795 { 1796 if (!device->set_vf_guid) 1797 return -ENOSYS; 1798 1799 return device->set_vf_guid(device, vf, port, guid, type); 1800 } 1801 EXPORT_SYMBOL(ib_set_vf_guid); 1802 1803 /** 1804 * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list 1805 * and set it the memory region. 1806 * @mr: memory region 1807 * @sg: dma mapped scatterlist 1808 * @sg_nents: number of entries in sg 1809 * @sg_offset: offset in bytes into sg 1810 * @page_size: page vector desired page size 1811 * 1812 * Constraints: 1813 * - The first sg element is allowed to have an offset. 1814 * - Each sg element must either be aligned to page_size or virtually 1815 * contiguous to the previous element. In case an sg element has a 1816 * non-contiguous offset, the mapping prefix will not include it. 1817 * - The last sg element is allowed to have length less than page_size. 1818 * - If sg_nents total byte length exceeds the mr max_num_sge * page_size 1819 * then only max_num_sg entries will be mapped. 1820 * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these 1821 * constraints holds and the page_size argument is ignored. 1822 * 1823 * Returns the number of sg elements that were mapped to the memory region. 1824 * 1825 * After this completes successfully, the memory region 1826 * is ready for registration. 1827 */ 1828 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 1829 unsigned int *sg_offset, unsigned int page_size) 1830 { 1831 if (unlikely(!mr->device->map_mr_sg)) 1832 return -ENOSYS; 1833 1834 mr->page_size = page_size; 1835 1836 return mr->device->map_mr_sg(mr, sg, sg_nents, sg_offset); 1837 } 1838 EXPORT_SYMBOL(ib_map_mr_sg); 1839 1840 /** 1841 * ib_sg_to_pages() - Convert the largest prefix of a sg list 1842 * to a page vector 1843 * @mr: memory region 1844 * @sgl: dma mapped scatterlist 1845 * @sg_nents: number of entries in sg 1846 * @sg_offset_p: IN: start offset in bytes into sg 1847 * OUT: offset in bytes for element n of the sg of the first 1848 * byte that has not been processed where n is the return 1849 * value of this function. 1850 * @set_page: driver page assignment function pointer 1851 * 1852 * Core service helper for drivers to convert the largest 1853 * prefix of given sg list to a page vector. The sg list 1854 * prefix converted is the prefix that meet the requirements 1855 * of ib_map_mr_sg. 1856 * 1857 * Returns the number of sg elements that were assigned to 1858 * a page vector. 1859 */ 1860 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents, 1861 unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64)) 1862 { 1863 struct scatterlist *sg; 1864 u64 last_end_dma_addr = 0; 1865 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0; 1866 unsigned int last_page_off = 0; 1867 u64 page_mask = ~((u64)mr->page_size - 1); 1868 int i, ret; 1869 1870 if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0]))) 1871 return -EINVAL; 1872 1873 mr->iova = sg_dma_address(&sgl[0]) + sg_offset; 1874 mr->length = 0; 1875 1876 for_each_sg(sgl, sg, sg_nents, i) { 1877 u64 dma_addr = sg_dma_address(sg) + sg_offset; 1878 u64 prev_addr = dma_addr; 1879 unsigned int dma_len = sg_dma_len(sg) - sg_offset; 1880 u64 end_dma_addr = dma_addr + dma_len; 1881 u64 page_addr = dma_addr & page_mask; 1882 1883 /* 1884 * For the second and later elements, check whether either the 1885 * end of element i-1 or the start of element i is not aligned 1886 * on a page boundary. 1887 */ 1888 if (i && (last_page_off != 0 || page_addr != dma_addr)) { 1889 /* Stop mapping if there is a gap. */ 1890 if (last_end_dma_addr != dma_addr) 1891 break; 1892 1893 /* 1894 * Coalesce this element with the last. If it is small 1895 * enough just update mr->length. Otherwise start 1896 * mapping from the next page. 1897 */ 1898 goto next_page; 1899 } 1900 1901 do { 1902 ret = set_page(mr, page_addr); 1903 if (unlikely(ret < 0)) { 1904 sg_offset = prev_addr - sg_dma_address(sg); 1905 mr->length += prev_addr - dma_addr; 1906 if (sg_offset_p) 1907 *sg_offset_p = sg_offset; 1908 return i || sg_offset ? i : ret; 1909 } 1910 prev_addr = page_addr; 1911 next_page: 1912 page_addr += mr->page_size; 1913 } while (page_addr < end_dma_addr); 1914 1915 mr->length += dma_len; 1916 last_end_dma_addr = end_dma_addr; 1917 last_page_off = end_dma_addr & ~page_mask; 1918 1919 sg_offset = 0; 1920 } 1921 1922 if (sg_offset_p) 1923 *sg_offset_p = 0; 1924 return i; 1925 } 1926 EXPORT_SYMBOL(ib_sg_to_pages); 1927 1928 struct ib_drain_cqe { 1929 struct ib_cqe cqe; 1930 struct completion done; 1931 }; 1932 1933 static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc) 1934 { 1935 struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe, 1936 cqe); 1937 1938 complete(&cqe->done); 1939 } 1940 1941 /* 1942 * Post a WR and block until its completion is reaped for the SQ. 1943 */ 1944 static void __ib_drain_sq(struct ib_qp *qp) 1945 { 1946 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; 1947 struct ib_drain_cqe sdrain; 1948 struct ib_send_wr swr = {}, *bad_swr; 1949 int ret; 1950 1951 if (qp->send_cq->poll_ctx == IB_POLL_DIRECT) { 1952 WARN_ONCE(qp->send_cq->poll_ctx == IB_POLL_DIRECT, 1953 "IB_POLL_DIRECT poll_ctx not supported for drain\n"); 1954 return; 1955 } 1956 1957 swr.wr_cqe = &sdrain.cqe; 1958 sdrain.cqe.done = ib_drain_qp_done; 1959 init_completion(&sdrain.done); 1960 1961 ret = ib_modify_qp(qp, &attr, IB_QP_STATE); 1962 if (ret) { 1963 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); 1964 return; 1965 } 1966 1967 ret = ib_post_send(qp, &swr, &bad_swr); 1968 if (ret) { 1969 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); 1970 return; 1971 } 1972 1973 wait_for_completion(&sdrain.done); 1974 } 1975 1976 /* 1977 * Post a WR and block until its completion is reaped for the RQ. 1978 */ 1979 static void __ib_drain_rq(struct ib_qp *qp) 1980 { 1981 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; 1982 struct ib_drain_cqe rdrain; 1983 struct ib_recv_wr rwr = {}, *bad_rwr; 1984 int ret; 1985 1986 if (qp->recv_cq->poll_ctx == IB_POLL_DIRECT) { 1987 WARN_ONCE(qp->recv_cq->poll_ctx == IB_POLL_DIRECT, 1988 "IB_POLL_DIRECT poll_ctx not supported for drain\n"); 1989 return; 1990 } 1991 1992 rwr.wr_cqe = &rdrain.cqe; 1993 rdrain.cqe.done = ib_drain_qp_done; 1994 init_completion(&rdrain.done); 1995 1996 ret = ib_modify_qp(qp, &attr, IB_QP_STATE); 1997 if (ret) { 1998 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); 1999 return; 2000 } 2001 2002 ret = ib_post_recv(qp, &rwr, &bad_rwr); 2003 if (ret) { 2004 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); 2005 return; 2006 } 2007 2008 wait_for_completion(&rdrain.done); 2009 } 2010 2011 /** 2012 * ib_drain_sq() - Block until all SQ CQEs have been consumed by the 2013 * application. 2014 * @qp: queue pair to drain 2015 * 2016 * If the device has a provider-specific drain function, then 2017 * call that. Otherwise call the generic drain function 2018 * __ib_drain_sq(). 2019 * 2020 * The caller must: 2021 * 2022 * ensure there is room in the CQ and SQ for the drain work request and 2023 * completion. 2024 * 2025 * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be 2026 * IB_POLL_DIRECT. 2027 * 2028 * ensure that there are no other contexts that are posting WRs concurrently. 2029 * Otherwise the drain is not guaranteed. 2030 */ 2031 void ib_drain_sq(struct ib_qp *qp) 2032 { 2033 if (qp->device->drain_sq) 2034 qp->device->drain_sq(qp); 2035 else 2036 __ib_drain_sq(qp); 2037 } 2038 EXPORT_SYMBOL(ib_drain_sq); 2039 2040 /** 2041 * ib_drain_rq() - Block until all RQ CQEs have been consumed by the 2042 * application. 2043 * @qp: queue pair to drain 2044 * 2045 * If the device has a provider-specific drain function, then 2046 * call that. Otherwise call the generic drain function 2047 * __ib_drain_rq(). 2048 * 2049 * The caller must: 2050 * 2051 * ensure there is room in the CQ and RQ for the drain work request and 2052 * completion. 2053 * 2054 * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be 2055 * IB_POLL_DIRECT. 2056 * 2057 * ensure that there are no other contexts that are posting WRs concurrently. 2058 * Otherwise the drain is not guaranteed. 2059 */ 2060 void ib_drain_rq(struct ib_qp *qp) 2061 { 2062 if (qp->device->drain_rq) 2063 qp->device->drain_rq(qp); 2064 else 2065 __ib_drain_rq(qp); 2066 } 2067 EXPORT_SYMBOL(ib_drain_rq); 2068 2069 /** 2070 * ib_drain_qp() - Block until all CQEs have been consumed by the 2071 * application on both the RQ and SQ. 2072 * @qp: queue pair to drain 2073 * 2074 * The caller must: 2075 * 2076 * ensure there is room in the CQ(s), SQ, and RQ for drain work requests 2077 * and completions. 2078 * 2079 * allocate the CQs using ib_alloc_cq() and the CQ poll context cannot be 2080 * IB_POLL_DIRECT. 2081 * 2082 * ensure that there are no other contexts that are posting WRs concurrently. 2083 * Otherwise the drain is not guaranteed. 2084 */ 2085 void ib_drain_qp(struct ib_qp *qp) 2086 { 2087 ib_drain_sq(qp); 2088 if (!qp->srq) 2089 ib_drain_rq(qp); 2090 } 2091 EXPORT_SYMBOL(ib_drain_qp); 2092