1 /* 2 * Copyright (c) 2005 Voltaire Inc. All rights reserved. 3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved. 4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved. 5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/completion.h> 37 #include <linux/in.h> 38 #include <linux/in6.h> 39 #include <linux/mutex.h> 40 #include <linux/random.h> 41 #include <linux/igmp.h> 42 #include <linux/idr.h> 43 #include <linux/inetdevice.h> 44 #include <linux/slab.h> 45 #include <linux/module.h> 46 #include <net/route.h> 47 48 #include <net/net_namespace.h> 49 #include <net/netns/generic.h> 50 #include <net/tcp.h> 51 #include <net/ipv6.h> 52 #include <net/ip_fib.h> 53 #include <net/ip6_route.h> 54 55 #include <rdma/rdma_cm.h> 56 #include <rdma/rdma_cm_ib.h> 57 #include <rdma/rdma_netlink.h> 58 #include <rdma/ib.h> 59 #include <rdma/ib_cache.h> 60 #include <rdma/ib_cm.h> 61 #include <rdma/ib_sa.h> 62 #include <rdma/iw_cm.h> 63 64 #include "core_priv.h" 65 66 MODULE_AUTHOR("Sean Hefty"); 67 MODULE_DESCRIPTION("Generic RDMA CM Agent"); 68 MODULE_LICENSE("Dual BSD/GPL"); 69 70 #define CMA_CM_RESPONSE_TIMEOUT 20 71 #define CMA_QUERY_CLASSPORT_INFO_TIMEOUT 3000 72 #define CMA_MAX_CM_RETRIES 15 73 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) 74 #define CMA_IBOE_PACKET_LIFETIME 18 75 76 static const char * const cma_events[] = { 77 [RDMA_CM_EVENT_ADDR_RESOLVED] = "address resolved", 78 [RDMA_CM_EVENT_ADDR_ERROR] = "address error", 79 [RDMA_CM_EVENT_ROUTE_RESOLVED] = "route resolved ", 80 [RDMA_CM_EVENT_ROUTE_ERROR] = "route error", 81 [RDMA_CM_EVENT_CONNECT_REQUEST] = "connect request", 82 [RDMA_CM_EVENT_CONNECT_RESPONSE] = "connect response", 83 [RDMA_CM_EVENT_CONNECT_ERROR] = "connect error", 84 [RDMA_CM_EVENT_UNREACHABLE] = "unreachable", 85 [RDMA_CM_EVENT_REJECTED] = "rejected", 86 [RDMA_CM_EVENT_ESTABLISHED] = "established", 87 [RDMA_CM_EVENT_DISCONNECTED] = "disconnected", 88 [RDMA_CM_EVENT_DEVICE_REMOVAL] = "device removal", 89 [RDMA_CM_EVENT_MULTICAST_JOIN] = "multicast join", 90 [RDMA_CM_EVENT_MULTICAST_ERROR] = "multicast error", 91 [RDMA_CM_EVENT_ADDR_CHANGE] = "address change", 92 [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit", 93 }; 94 95 const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event) 96 { 97 size_t index = event; 98 99 return (index < ARRAY_SIZE(cma_events) && cma_events[index]) ? 100 cma_events[index] : "unrecognized event"; 101 } 102 EXPORT_SYMBOL(rdma_event_msg); 103 104 static void cma_add_one(struct ib_device *device); 105 static void cma_remove_one(struct ib_device *device, void *client_data); 106 107 static struct ib_client cma_client = { 108 .name = "cma", 109 .add = cma_add_one, 110 .remove = cma_remove_one 111 }; 112 113 static struct ib_sa_client sa_client; 114 static struct rdma_addr_client addr_client; 115 static LIST_HEAD(dev_list); 116 static LIST_HEAD(listen_any_list); 117 static DEFINE_MUTEX(lock); 118 static struct workqueue_struct *cma_wq; 119 static int cma_pernet_id; 120 121 struct cma_pernet { 122 struct idr tcp_ps; 123 struct idr udp_ps; 124 struct idr ipoib_ps; 125 struct idr ib_ps; 126 }; 127 128 static struct cma_pernet *cma_pernet(struct net *net) 129 { 130 return net_generic(net, cma_pernet_id); 131 } 132 133 static struct idr *cma_pernet_idr(struct net *net, enum rdma_port_space ps) 134 { 135 struct cma_pernet *pernet = cma_pernet(net); 136 137 switch (ps) { 138 case RDMA_PS_TCP: 139 return &pernet->tcp_ps; 140 case RDMA_PS_UDP: 141 return &pernet->udp_ps; 142 case RDMA_PS_IPOIB: 143 return &pernet->ipoib_ps; 144 case RDMA_PS_IB: 145 return &pernet->ib_ps; 146 default: 147 return NULL; 148 } 149 } 150 151 struct cma_device { 152 struct list_head list; 153 struct ib_device *device; 154 struct completion comp; 155 atomic_t refcount; 156 struct list_head id_list; 157 enum ib_gid_type *default_gid_type; 158 }; 159 160 struct rdma_bind_list { 161 enum rdma_port_space ps; 162 struct hlist_head owners; 163 unsigned short port; 164 }; 165 166 struct class_port_info_context { 167 struct ib_class_port_info *class_port_info; 168 struct ib_device *device; 169 struct completion done; 170 struct ib_sa_query *sa_query; 171 u8 port_num; 172 }; 173 174 static int cma_ps_alloc(struct net *net, enum rdma_port_space ps, 175 struct rdma_bind_list *bind_list, int snum) 176 { 177 struct idr *idr = cma_pernet_idr(net, ps); 178 179 return idr_alloc(idr, bind_list, snum, snum + 1, GFP_KERNEL); 180 } 181 182 static struct rdma_bind_list *cma_ps_find(struct net *net, 183 enum rdma_port_space ps, int snum) 184 { 185 struct idr *idr = cma_pernet_idr(net, ps); 186 187 return idr_find(idr, snum); 188 } 189 190 static void cma_ps_remove(struct net *net, enum rdma_port_space ps, int snum) 191 { 192 struct idr *idr = cma_pernet_idr(net, ps); 193 194 idr_remove(idr, snum); 195 } 196 197 enum { 198 CMA_OPTION_AFONLY, 199 }; 200 201 void cma_ref_dev(struct cma_device *cma_dev) 202 { 203 atomic_inc(&cma_dev->refcount); 204 } 205 206 struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter, 207 void *cookie) 208 { 209 struct cma_device *cma_dev; 210 struct cma_device *found_cma_dev = NULL; 211 212 mutex_lock(&lock); 213 214 list_for_each_entry(cma_dev, &dev_list, list) 215 if (filter(cma_dev->device, cookie)) { 216 found_cma_dev = cma_dev; 217 break; 218 } 219 220 if (found_cma_dev) 221 cma_ref_dev(found_cma_dev); 222 mutex_unlock(&lock); 223 return found_cma_dev; 224 } 225 226 int cma_get_default_gid_type(struct cma_device *cma_dev, 227 unsigned int port) 228 { 229 if (port < rdma_start_port(cma_dev->device) || 230 port > rdma_end_port(cma_dev->device)) 231 return -EINVAL; 232 233 return cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)]; 234 } 235 236 int cma_set_default_gid_type(struct cma_device *cma_dev, 237 unsigned int port, 238 enum ib_gid_type default_gid_type) 239 { 240 unsigned long supported_gids; 241 242 if (port < rdma_start_port(cma_dev->device) || 243 port > rdma_end_port(cma_dev->device)) 244 return -EINVAL; 245 246 supported_gids = roce_gid_type_mask_support(cma_dev->device, port); 247 248 if (!(supported_gids & 1 << default_gid_type)) 249 return -EINVAL; 250 251 cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)] = 252 default_gid_type; 253 254 return 0; 255 } 256 257 struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev) 258 { 259 return cma_dev->device; 260 } 261 262 /* 263 * Device removal can occur at anytime, so we need extra handling to 264 * serialize notifying the user of device removal with other callbacks. 265 * We do this by disabling removal notification while a callback is in process, 266 * and reporting it after the callback completes. 267 */ 268 struct rdma_id_private { 269 struct rdma_cm_id id; 270 271 struct rdma_bind_list *bind_list; 272 struct hlist_node node; 273 struct list_head list; /* listen_any_list or cma_device.list */ 274 struct list_head listen_list; /* per device listens */ 275 struct cma_device *cma_dev; 276 struct list_head mc_list; 277 278 int internal_id; 279 enum rdma_cm_state state; 280 spinlock_t lock; 281 struct mutex qp_mutex; 282 283 struct completion comp; 284 atomic_t refcount; 285 struct mutex handler_mutex; 286 287 int backlog; 288 int timeout_ms; 289 struct ib_sa_query *query; 290 int query_id; 291 union { 292 struct ib_cm_id *ib; 293 struct iw_cm_id *iw; 294 } cm_id; 295 296 u32 seq_num; 297 u32 qkey; 298 u32 qp_num; 299 pid_t owner; 300 u32 options; 301 u8 srq; 302 u8 tos; 303 u8 reuseaddr; 304 u8 afonly; 305 enum ib_gid_type gid_type; 306 }; 307 308 struct cma_multicast { 309 struct rdma_id_private *id_priv; 310 union { 311 struct ib_sa_multicast *ib; 312 } multicast; 313 struct list_head list; 314 void *context; 315 struct sockaddr_storage addr; 316 struct kref mcref; 317 bool igmp_joined; 318 u8 join_state; 319 }; 320 321 struct cma_work { 322 struct work_struct work; 323 struct rdma_id_private *id; 324 enum rdma_cm_state old_state; 325 enum rdma_cm_state new_state; 326 struct rdma_cm_event event; 327 }; 328 329 struct cma_ndev_work { 330 struct work_struct work; 331 struct rdma_id_private *id; 332 struct rdma_cm_event event; 333 }; 334 335 struct iboe_mcast_work { 336 struct work_struct work; 337 struct rdma_id_private *id; 338 struct cma_multicast *mc; 339 }; 340 341 union cma_ip_addr { 342 struct in6_addr ip6; 343 struct { 344 __be32 pad[3]; 345 __be32 addr; 346 } ip4; 347 }; 348 349 struct cma_hdr { 350 u8 cma_version; 351 u8 ip_version; /* IP version: 7:4 */ 352 __be16 port; 353 union cma_ip_addr src_addr; 354 union cma_ip_addr dst_addr; 355 }; 356 357 #define CMA_VERSION 0x00 358 359 struct cma_req_info { 360 struct ib_device *device; 361 int port; 362 union ib_gid local_gid; 363 __be64 service_id; 364 u16 pkey; 365 bool has_gid:1; 366 }; 367 368 static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp) 369 { 370 unsigned long flags; 371 int ret; 372 373 spin_lock_irqsave(&id_priv->lock, flags); 374 ret = (id_priv->state == comp); 375 spin_unlock_irqrestore(&id_priv->lock, flags); 376 return ret; 377 } 378 379 static int cma_comp_exch(struct rdma_id_private *id_priv, 380 enum rdma_cm_state comp, enum rdma_cm_state exch) 381 { 382 unsigned long flags; 383 int ret; 384 385 spin_lock_irqsave(&id_priv->lock, flags); 386 if ((ret = (id_priv->state == comp))) 387 id_priv->state = exch; 388 spin_unlock_irqrestore(&id_priv->lock, flags); 389 return ret; 390 } 391 392 static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv, 393 enum rdma_cm_state exch) 394 { 395 unsigned long flags; 396 enum rdma_cm_state old; 397 398 spin_lock_irqsave(&id_priv->lock, flags); 399 old = id_priv->state; 400 id_priv->state = exch; 401 spin_unlock_irqrestore(&id_priv->lock, flags); 402 return old; 403 } 404 405 static inline u8 cma_get_ip_ver(const struct cma_hdr *hdr) 406 { 407 return hdr->ip_version >> 4; 408 } 409 410 static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver) 411 { 412 hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF); 413 } 414 415 static int cma_igmp_send(struct net_device *ndev, union ib_gid *mgid, bool join) 416 { 417 struct in_device *in_dev = NULL; 418 419 if (ndev) { 420 rtnl_lock(); 421 in_dev = __in_dev_get_rtnl(ndev); 422 if (in_dev) { 423 if (join) 424 ip_mc_inc_group(in_dev, 425 *(__be32 *)(mgid->raw + 12)); 426 else 427 ip_mc_dec_group(in_dev, 428 *(__be32 *)(mgid->raw + 12)); 429 } 430 rtnl_unlock(); 431 } 432 return (in_dev) ? 0 : -ENODEV; 433 } 434 435 static void _cma_attach_to_dev(struct rdma_id_private *id_priv, 436 struct cma_device *cma_dev) 437 { 438 cma_ref_dev(cma_dev); 439 id_priv->cma_dev = cma_dev; 440 id_priv->gid_type = 0; 441 id_priv->id.device = cma_dev->device; 442 id_priv->id.route.addr.dev_addr.transport = 443 rdma_node_get_transport(cma_dev->device->node_type); 444 list_add_tail(&id_priv->list, &cma_dev->id_list); 445 } 446 447 static void cma_attach_to_dev(struct rdma_id_private *id_priv, 448 struct cma_device *cma_dev) 449 { 450 _cma_attach_to_dev(id_priv, cma_dev); 451 id_priv->gid_type = 452 cma_dev->default_gid_type[id_priv->id.port_num - 453 rdma_start_port(cma_dev->device)]; 454 } 455 456 void cma_deref_dev(struct cma_device *cma_dev) 457 { 458 if (atomic_dec_and_test(&cma_dev->refcount)) 459 complete(&cma_dev->comp); 460 } 461 462 static inline void release_mc(struct kref *kref) 463 { 464 struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref); 465 466 kfree(mc->multicast.ib); 467 kfree(mc); 468 } 469 470 static void cma_release_dev(struct rdma_id_private *id_priv) 471 { 472 mutex_lock(&lock); 473 list_del(&id_priv->list); 474 cma_deref_dev(id_priv->cma_dev); 475 id_priv->cma_dev = NULL; 476 mutex_unlock(&lock); 477 } 478 479 static inline struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv) 480 { 481 return (struct sockaddr *) &id_priv->id.route.addr.src_addr; 482 } 483 484 static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv) 485 { 486 return (struct sockaddr *) &id_priv->id.route.addr.dst_addr; 487 } 488 489 static inline unsigned short cma_family(struct rdma_id_private *id_priv) 490 { 491 return id_priv->id.route.addr.src_addr.ss_family; 492 } 493 494 static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey) 495 { 496 struct ib_sa_mcmember_rec rec; 497 int ret = 0; 498 499 if (id_priv->qkey) { 500 if (qkey && id_priv->qkey != qkey) 501 return -EINVAL; 502 return 0; 503 } 504 505 if (qkey) { 506 id_priv->qkey = qkey; 507 return 0; 508 } 509 510 switch (id_priv->id.ps) { 511 case RDMA_PS_UDP: 512 case RDMA_PS_IB: 513 id_priv->qkey = RDMA_UDP_QKEY; 514 break; 515 case RDMA_PS_IPOIB: 516 ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid); 517 ret = ib_sa_get_mcmember_rec(id_priv->id.device, 518 id_priv->id.port_num, &rec.mgid, 519 &rec); 520 if (!ret) 521 id_priv->qkey = be32_to_cpu(rec.qkey); 522 break; 523 default: 524 break; 525 } 526 return ret; 527 } 528 529 static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr) 530 { 531 dev_addr->dev_type = ARPHRD_INFINIBAND; 532 rdma_addr_set_sgid(dev_addr, (union ib_gid *) &sib->sib_addr); 533 ib_addr_set_pkey(dev_addr, ntohs(sib->sib_pkey)); 534 } 535 536 static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) 537 { 538 int ret; 539 540 if (addr->sa_family != AF_IB) { 541 ret = rdma_translate_ip(addr, dev_addr, NULL); 542 } else { 543 cma_translate_ib((struct sockaddr_ib *) addr, dev_addr); 544 ret = 0; 545 } 546 547 return ret; 548 } 549 550 static inline int cma_validate_port(struct ib_device *device, u8 port, 551 enum ib_gid_type gid_type, 552 union ib_gid *gid, int dev_type, 553 int bound_if_index) 554 { 555 int ret = -ENODEV; 556 struct net_device *ndev = NULL; 557 558 if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port)) 559 return ret; 560 561 if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port)) 562 return ret; 563 564 if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) { 565 ndev = dev_get_by_index(&init_net, bound_if_index); 566 if (ndev && ndev->flags & IFF_LOOPBACK) { 567 pr_info("detected loopback device\n"); 568 dev_put(ndev); 569 570 if (!device->get_netdev) 571 return -EOPNOTSUPP; 572 573 ndev = device->get_netdev(device, port); 574 if (!ndev) 575 return -ENODEV; 576 } 577 } else { 578 gid_type = IB_GID_TYPE_IB; 579 } 580 581 ret = ib_find_cached_gid_by_port(device, gid, gid_type, port, 582 ndev, NULL); 583 584 if (ndev) 585 dev_put(ndev); 586 587 return ret; 588 } 589 590 static int cma_acquire_dev(struct rdma_id_private *id_priv, 591 struct rdma_id_private *listen_id_priv) 592 { 593 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 594 struct cma_device *cma_dev; 595 union ib_gid gid, iboe_gid, *gidp; 596 int ret = -ENODEV; 597 u8 port; 598 599 if (dev_addr->dev_type != ARPHRD_INFINIBAND && 600 id_priv->id.ps == RDMA_PS_IPOIB) 601 return -EINVAL; 602 603 mutex_lock(&lock); 604 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, 605 &iboe_gid); 606 607 memcpy(&gid, dev_addr->src_dev_addr + 608 rdma_addr_gid_offset(dev_addr), sizeof gid); 609 610 if (listen_id_priv) { 611 cma_dev = listen_id_priv->cma_dev; 612 port = listen_id_priv->id.port_num; 613 gidp = rdma_protocol_roce(cma_dev->device, port) ? 614 &iboe_gid : &gid; 615 616 ret = cma_validate_port(cma_dev->device, port, 617 rdma_protocol_ib(cma_dev->device, port) ? 618 IB_GID_TYPE_IB : 619 listen_id_priv->gid_type, gidp, 620 dev_addr->dev_type, 621 dev_addr->bound_dev_if); 622 if (!ret) { 623 id_priv->id.port_num = port; 624 goto out; 625 } 626 } 627 628 list_for_each_entry(cma_dev, &dev_list, list) { 629 for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) { 630 if (listen_id_priv && 631 listen_id_priv->cma_dev == cma_dev && 632 listen_id_priv->id.port_num == port) 633 continue; 634 635 gidp = rdma_protocol_roce(cma_dev->device, port) ? 636 &iboe_gid : &gid; 637 638 ret = cma_validate_port(cma_dev->device, port, 639 rdma_protocol_ib(cma_dev->device, port) ? 640 IB_GID_TYPE_IB : 641 cma_dev->default_gid_type[port - 1], 642 gidp, dev_addr->dev_type, 643 dev_addr->bound_dev_if); 644 if (!ret) { 645 id_priv->id.port_num = port; 646 goto out; 647 } 648 } 649 } 650 651 out: 652 if (!ret) 653 cma_attach_to_dev(id_priv, cma_dev); 654 655 mutex_unlock(&lock); 656 return ret; 657 } 658 659 /* 660 * Select the source IB device and address to reach the destination IB address. 661 */ 662 static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) 663 { 664 struct cma_device *cma_dev, *cur_dev; 665 struct sockaddr_ib *addr; 666 union ib_gid gid, sgid, *dgid; 667 u16 pkey, index; 668 u8 p; 669 int i; 670 671 cma_dev = NULL; 672 addr = (struct sockaddr_ib *) cma_dst_addr(id_priv); 673 dgid = (union ib_gid *) &addr->sib_addr; 674 pkey = ntohs(addr->sib_pkey); 675 676 list_for_each_entry(cur_dev, &dev_list, list) { 677 for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { 678 if (!rdma_cap_af_ib(cur_dev->device, p)) 679 continue; 680 681 if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index)) 682 continue; 683 684 for (i = 0; !ib_get_cached_gid(cur_dev->device, p, i, 685 &gid, NULL); 686 i++) { 687 if (!memcmp(&gid, dgid, sizeof(gid))) { 688 cma_dev = cur_dev; 689 sgid = gid; 690 id_priv->id.port_num = p; 691 goto found; 692 } 693 694 if (!cma_dev && (gid.global.subnet_prefix == 695 dgid->global.subnet_prefix)) { 696 cma_dev = cur_dev; 697 sgid = gid; 698 id_priv->id.port_num = p; 699 } 700 } 701 } 702 } 703 704 if (!cma_dev) 705 return -ENODEV; 706 707 found: 708 cma_attach_to_dev(id_priv, cma_dev); 709 addr = (struct sockaddr_ib *) cma_src_addr(id_priv); 710 memcpy(&addr->sib_addr, &sgid, sizeof sgid); 711 cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); 712 return 0; 713 } 714 715 static void cma_deref_id(struct rdma_id_private *id_priv) 716 { 717 if (atomic_dec_and_test(&id_priv->refcount)) 718 complete(&id_priv->comp); 719 } 720 721 struct rdma_cm_id *rdma_create_id(struct net *net, 722 rdma_cm_event_handler event_handler, 723 void *context, enum rdma_port_space ps, 724 enum ib_qp_type qp_type) 725 { 726 struct rdma_id_private *id_priv; 727 728 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL); 729 if (!id_priv) 730 return ERR_PTR(-ENOMEM); 731 732 id_priv->owner = task_pid_nr(current); 733 id_priv->state = RDMA_CM_IDLE; 734 id_priv->id.context = context; 735 id_priv->id.event_handler = event_handler; 736 id_priv->id.ps = ps; 737 id_priv->id.qp_type = qp_type; 738 spin_lock_init(&id_priv->lock); 739 mutex_init(&id_priv->qp_mutex); 740 init_completion(&id_priv->comp); 741 atomic_set(&id_priv->refcount, 1); 742 mutex_init(&id_priv->handler_mutex); 743 INIT_LIST_HEAD(&id_priv->listen_list); 744 INIT_LIST_HEAD(&id_priv->mc_list); 745 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); 746 id_priv->id.route.addr.dev_addr.net = get_net(net); 747 748 return &id_priv->id; 749 } 750 EXPORT_SYMBOL(rdma_create_id); 751 752 static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 753 { 754 struct ib_qp_attr qp_attr; 755 int qp_attr_mask, ret; 756 757 qp_attr.qp_state = IB_QPS_INIT; 758 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 759 if (ret) 760 return ret; 761 762 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); 763 if (ret) 764 return ret; 765 766 qp_attr.qp_state = IB_QPS_RTR; 767 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE); 768 if (ret) 769 return ret; 770 771 qp_attr.qp_state = IB_QPS_RTS; 772 qp_attr.sq_psn = 0; 773 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN); 774 775 return ret; 776 } 777 778 static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 779 { 780 struct ib_qp_attr qp_attr; 781 int qp_attr_mask, ret; 782 783 qp_attr.qp_state = IB_QPS_INIT; 784 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 785 if (ret) 786 return ret; 787 788 return ib_modify_qp(qp, &qp_attr, qp_attr_mask); 789 } 790 791 int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, 792 struct ib_qp_init_attr *qp_init_attr) 793 { 794 struct rdma_id_private *id_priv; 795 struct ib_qp *qp; 796 int ret; 797 798 id_priv = container_of(id, struct rdma_id_private, id); 799 if (id->device != pd->device) 800 return -EINVAL; 801 802 qp_init_attr->port_num = id->port_num; 803 qp = ib_create_qp(pd, qp_init_attr); 804 if (IS_ERR(qp)) 805 return PTR_ERR(qp); 806 807 if (id->qp_type == IB_QPT_UD) 808 ret = cma_init_ud_qp(id_priv, qp); 809 else 810 ret = cma_init_conn_qp(id_priv, qp); 811 if (ret) 812 goto err; 813 814 id->qp = qp; 815 id_priv->qp_num = qp->qp_num; 816 id_priv->srq = (qp->srq != NULL); 817 return 0; 818 err: 819 ib_destroy_qp(qp); 820 return ret; 821 } 822 EXPORT_SYMBOL(rdma_create_qp); 823 824 void rdma_destroy_qp(struct rdma_cm_id *id) 825 { 826 struct rdma_id_private *id_priv; 827 828 id_priv = container_of(id, struct rdma_id_private, id); 829 mutex_lock(&id_priv->qp_mutex); 830 ib_destroy_qp(id_priv->id.qp); 831 id_priv->id.qp = NULL; 832 mutex_unlock(&id_priv->qp_mutex); 833 } 834 EXPORT_SYMBOL(rdma_destroy_qp); 835 836 static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, 837 struct rdma_conn_param *conn_param) 838 { 839 struct ib_qp_attr qp_attr; 840 int qp_attr_mask, ret; 841 union ib_gid sgid; 842 843 mutex_lock(&id_priv->qp_mutex); 844 if (!id_priv->id.qp) { 845 ret = 0; 846 goto out; 847 } 848 849 /* Need to update QP attributes from default values. */ 850 qp_attr.qp_state = IB_QPS_INIT; 851 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 852 if (ret) 853 goto out; 854 855 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 856 if (ret) 857 goto out; 858 859 qp_attr.qp_state = IB_QPS_RTR; 860 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 861 if (ret) 862 goto out; 863 864 ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num, 865 qp_attr.ah_attr.grh.sgid_index, &sgid, NULL); 866 if (ret) 867 goto out; 868 869 BUG_ON(id_priv->cma_dev->device != id_priv->id.device); 870 871 if (conn_param) 872 qp_attr.max_dest_rd_atomic = conn_param->responder_resources; 873 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 874 out: 875 mutex_unlock(&id_priv->qp_mutex); 876 return ret; 877 } 878 879 static int cma_modify_qp_rts(struct rdma_id_private *id_priv, 880 struct rdma_conn_param *conn_param) 881 { 882 struct ib_qp_attr qp_attr; 883 int qp_attr_mask, ret; 884 885 mutex_lock(&id_priv->qp_mutex); 886 if (!id_priv->id.qp) { 887 ret = 0; 888 goto out; 889 } 890 891 qp_attr.qp_state = IB_QPS_RTS; 892 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 893 if (ret) 894 goto out; 895 896 if (conn_param) 897 qp_attr.max_rd_atomic = conn_param->initiator_depth; 898 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 899 out: 900 mutex_unlock(&id_priv->qp_mutex); 901 return ret; 902 } 903 904 static int cma_modify_qp_err(struct rdma_id_private *id_priv) 905 { 906 struct ib_qp_attr qp_attr; 907 int ret; 908 909 mutex_lock(&id_priv->qp_mutex); 910 if (!id_priv->id.qp) { 911 ret = 0; 912 goto out; 913 } 914 915 qp_attr.qp_state = IB_QPS_ERR; 916 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE); 917 out: 918 mutex_unlock(&id_priv->qp_mutex); 919 return ret; 920 } 921 922 static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, 923 struct ib_qp_attr *qp_attr, int *qp_attr_mask) 924 { 925 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 926 int ret; 927 u16 pkey; 928 929 if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num)) 930 pkey = 0xffff; 931 else 932 pkey = ib_addr_get_pkey(dev_addr); 933 934 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, 935 pkey, &qp_attr->pkey_index); 936 if (ret) 937 return ret; 938 939 qp_attr->port_num = id_priv->id.port_num; 940 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; 941 942 if (id_priv->id.qp_type == IB_QPT_UD) { 943 ret = cma_set_qkey(id_priv, 0); 944 if (ret) 945 return ret; 946 947 qp_attr->qkey = id_priv->qkey; 948 *qp_attr_mask |= IB_QP_QKEY; 949 } else { 950 qp_attr->qp_access_flags = 0; 951 *qp_attr_mask |= IB_QP_ACCESS_FLAGS; 952 } 953 return 0; 954 } 955 956 int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, 957 int *qp_attr_mask) 958 { 959 struct rdma_id_private *id_priv; 960 int ret = 0; 961 962 id_priv = container_of(id, struct rdma_id_private, id); 963 if (rdma_cap_ib_cm(id->device, id->port_num)) { 964 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) 965 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); 966 else 967 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, 968 qp_attr_mask); 969 970 if (qp_attr->qp_state == IB_QPS_RTR) 971 qp_attr->rq_psn = id_priv->seq_num; 972 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 973 if (!id_priv->cm_id.iw) { 974 qp_attr->qp_access_flags = 0; 975 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; 976 } else 977 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, 978 qp_attr_mask); 979 } else 980 ret = -ENOSYS; 981 982 return ret; 983 } 984 EXPORT_SYMBOL(rdma_init_qp_attr); 985 986 static inline int cma_zero_addr(struct sockaddr *addr) 987 { 988 switch (addr->sa_family) { 989 case AF_INET: 990 return ipv4_is_zeronet(((struct sockaddr_in *)addr)->sin_addr.s_addr); 991 case AF_INET6: 992 return ipv6_addr_any(&((struct sockaddr_in6 *) addr)->sin6_addr); 993 case AF_IB: 994 return ib_addr_any(&((struct sockaddr_ib *) addr)->sib_addr); 995 default: 996 return 0; 997 } 998 } 999 1000 static inline int cma_loopback_addr(struct sockaddr *addr) 1001 { 1002 switch (addr->sa_family) { 1003 case AF_INET: 1004 return ipv4_is_loopback(((struct sockaddr_in *) addr)->sin_addr.s_addr); 1005 case AF_INET6: 1006 return ipv6_addr_loopback(&((struct sockaddr_in6 *) addr)->sin6_addr); 1007 case AF_IB: 1008 return ib_addr_loopback(&((struct sockaddr_ib *) addr)->sib_addr); 1009 default: 1010 return 0; 1011 } 1012 } 1013 1014 static inline int cma_any_addr(struct sockaddr *addr) 1015 { 1016 return cma_zero_addr(addr) || cma_loopback_addr(addr); 1017 } 1018 1019 static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst) 1020 { 1021 if (src->sa_family != dst->sa_family) 1022 return -1; 1023 1024 switch (src->sa_family) { 1025 case AF_INET: 1026 return ((struct sockaddr_in *) src)->sin_addr.s_addr != 1027 ((struct sockaddr_in *) dst)->sin_addr.s_addr; 1028 case AF_INET6: 1029 return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr, 1030 &((struct sockaddr_in6 *) dst)->sin6_addr); 1031 default: 1032 return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr, 1033 &((struct sockaddr_ib *) dst)->sib_addr); 1034 } 1035 } 1036 1037 static __be16 cma_port(struct sockaddr *addr) 1038 { 1039 struct sockaddr_ib *sib; 1040 1041 switch (addr->sa_family) { 1042 case AF_INET: 1043 return ((struct sockaddr_in *) addr)->sin_port; 1044 case AF_INET6: 1045 return ((struct sockaddr_in6 *) addr)->sin6_port; 1046 case AF_IB: 1047 sib = (struct sockaddr_ib *) addr; 1048 return htons((u16) (be64_to_cpu(sib->sib_sid) & 1049 be64_to_cpu(sib->sib_sid_mask))); 1050 default: 1051 return 0; 1052 } 1053 } 1054 1055 static inline int cma_any_port(struct sockaddr *addr) 1056 { 1057 return !cma_port(addr); 1058 } 1059 1060 static void cma_save_ib_info(struct sockaddr *src_addr, 1061 struct sockaddr *dst_addr, 1062 struct rdma_cm_id *listen_id, 1063 struct ib_sa_path_rec *path) 1064 { 1065 struct sockaddr_ib *listen_ib, *ib; 1066 1067 listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr; 1068 if (src_addr) { 1069 ib = (struct sockaddr_ib *)src_addr; 1070 ib->sib_family = AF_IB; 1071 if (path) { 1072 ib->sib_pkey = path->pkey; 1073 ib->sib_flowinfo = path->flow_label; 1074 memcpy(&ib->sib_addr, &path->sgid, 16); 1075 ib->sib_sid = path->service_id; 1076 ib->sib_scope_id = 0; 1077 } else { 1078 ib->sib_pkey = listen_ib->sib_pkey; 1079 ib->sib_flowinfo = listen_ib->sib_flowinfo; 1080 ib->sib_addr = listen_ib->sib_addr; 1081 ib->sib_sid = listen_ib->sib_sid; 1082 ib->sib_scope_id = listen_ib->sib_scope_id; 1083 } 1084 ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL); 1085 } 1086 if (dst_addr) { 1087 ib = (struct sockaddr_ib *)dst_addr; 1088 ib->sib_family = AF_IB; 1089 if (path) { 1090 ib->sib_pkey = path->pkey; 1091 ib->sib_flowinfo = path->flow_label; 1092 memcpy(&ib->sib_addr, &path->dgid, 16); 1093 } 1094 } 1095 } 1096 1097 static void cma_save_ip4_info(struct sockaddr *src_addr, 1098 struct sockaddr *dst_addr, 1099 struct cma_hdr *hdr, 1100 __be16 local_port) 1101 { 1102 struct sockaddr_in *ip4; 1103 1104 if (src_addr) { 1105 ip4 = (struct sockaddr_in *)src_addr; 1106 ip4->sin_family = AF_INET; 1107 ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr; 1108 ip4->sin_port = local_port; 1109 } 1110 1111 if (dst_addr) { 1112 ip4 = (struct sockaddr_in *)dst_addr; 1113 ip4->sin_family = AF_INET; 1114 ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr; 1115 ip4->sin_port = hdr->port; 1116 } 1117 } 1118 1119 static void cma_save_ip6_info(struct sockaddr *src_addr, 1120 struct sockaddr *dst_addr, 1121 struct cma_hdr *hdr, 1122 __be16 local_port) 1123 { 1124 struct sockaddr_in6 *ip6; 1125 1126 if (src_addr) { 1127 ip6 = (struct sockaddr_in6 *)src_addr; 1128 ip6->sin6_family = AF_INET6; 1129 ip6->sin6_addr = hdr->dst_addr.ip6; 1130 ip6->sin6_port = local_port; 1131 } 1132 1133 if (dst_addr) { 1134 ip6 = (struct sockaddr_in6 *)dst_addr; 1135 ip6->sin6_family = AF_INET6; 1136 ip6->sin6_addr = hdr->src_addr.ip6; 1137 ip6->sin6_port = hdr->port; 1138 } 1139 } 1140 1141 static u16 cma_port_from_service_id(__be64 service_id) 1142 { 1143 return (u16)be64_to_cpu(service_id); 1144 } 1145 1146 static int cma_save_ip_info(struct sockaddr *src_addr, 1147 struct sockaddr *dst_addr, 1148 struct ib_cm_event *ib_event, 1149 __be64 service_id) 1150 { 1151 struct cma_hdr *hdr; 1152 __be16 port; 1153 1154 hdr = ib_event->private_data; 1155 if (hdr->cma_version != CMA_VERSION) 1156 return -EINVAL; 1157 1158 port = htons(cma_port_from_service_id(service_id)); 1159 1160 switch (cma_get_ip_ver(hdr)) { 1161 case 4: 1162 cma_save_ip4_info(src_addr, dst_addr, hdr, port); 1163 break; 1164 case 6: 1165 cma_save_ip6_info(src_addr, dst_addr, hdr, port); 1166 break; 1167 default: 1168 return -EAFNOSUPPORT; 1169 } 1170 1171 return 0; 1172 } 1173 1174 static int cma_save_net_info(struct sockaddr *src_addr, 1175 struct sockaddr *dst_addr, 1176 struct rdma_cm_id *listen_id, 1177 struct ib_cm_event *ib_event, 1178 sa_family_t sa_family, __be64 service_id) 1179 { 1180 if (sa_family == AF_IB) { 1181 if (ib_event->event == IB_CM_REQ_RECEIVED) 1182 cma_save_ib_info(src_addr, dst_addr, listen_id, 1183 ib_event->param.req_rcvd.primary_path); 1184 else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) 1185 cma_save_ib_info(src_addr, dst_addr, listen_id, NULL); 1186 return 0; 1187 } 1188 1189 return cma_save_ip_info(src_addr, dst_addr, ib_event, service_id); 1190 } 1191 1192 static int cma_save_req_info(const struct ib_cm_event *ib_event, 1193 struct cma_req_info *req) 1194 { 1195 const struct ib_cm_req_event_param *req_param = 1196 &ib_event->param.req_rcvd; 1197 const struct ib_cm_sidr_req_event_param *sidr_param = 1198 &ib_event->param.sidr_req_rcvd; 1199 1200 switch (ib_event->event) { 1201 case IB_CM_REQ_RECEIVED: 1202 req->device = req_param->listen_id->device; 1203 req->port = req_param->port; 1204 memcpy(&req->local_gid, &req_param->primary_path->sgid, 1205 sizeof(req->local_gid)); 1206 req->has_gid = true; 1207 req->service_id = req_param->primary_path->service_id; 1208 req->pkey = be16_to_cpu(req_param->primary_path->pkey); 1209 if (req->pkey != req_param->bth_pkey) 1210 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n" 1211 "RDMA CMA: in the future this may cause the request to be dropped\n", 1212 req_param->bth_pkey, req->pkey); 1213 break; 1214 case IB_CM_SIDR_REQ_RECEIVED: 1215 req->device = sidr_param->listen_id->device; 1216 req->port = sidr_param->port; 1217 req->has_gid = false; 1218 req->service_id = sidr_param->service_id; 1219 req->pkey = sidr_param->pkey; 1220 if (req->pkey != sidr_param->bth_pkey) 1221 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and SIDR request payload P_Key (0x%x)\n" 1222 "RDMA CMA: in the future this may cause the request to be dropped\n", 1223 sidr_param->bth_pkey, req->pkey); 1224 break; 1225 default: 1226 return -EINVAL; 1227 } 1228 1229 return 0; 1230 } 1231 1232 static bool validate_ipv4_net_dev(struct net_device *net_dev, 1233 const struct sockaddr_in *dst_addr, 1234 const struct sockaddr_in *src_addr) 1235 { 1236 __be32 daddr = dst_addr->sin_addr.s_addr, 1237 saddr = src_addr->sin_addr.s_addr; 1238 struct fib_result res; 1239 struct flowi4 fl4; 1240 int err; 1241 bool ret; 1242 1243 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) || 1244 ipv4_is_lbcast(daddr) || ipv4_is_zeronet(saddr) || 1245 ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr) || 1246 ipv4_is_loopback(saddr)) 1247 return false; 1248 1249 memset(&fl4, 0, sizeof(fl4)); 1250 fl4.flowi4_iif = net_dev->ifindex; 1251 fl4.daddr = daddr; 1252 fl4.saddr = saddr; 1253 1254 rcu_read_lock(); 1255 err = fib_lookup(dev_net(net_dev), &fl4, &res, 0); 1256 ret = err == 0 && FIB_RES_DEV(res) == net_dev; 1257 rcu_read_unlock(); 1258 1259 return ret; 1260 } 1261 1262 static bool validate_ipv6_net_dev(struct net_device *net_dev, 1263 const struct sockaddr_in6 *dst_addr, 1264 const struct sockaddr_in6 *src_addr) 1265 { 1266 #if IS_ENABLED(CONFIG_IPV6) 1267 const int strict = ipv6_addr_type(&dst_addr->sin6_addr) & 1268 IPV6_ADDR_LINKLOCAL; 1269 struct rt6_info *rt = rt6_lookup(dev_net(net_dev), &dst_addr->sin6_addr, 1270 &src_addr->sin6_addr, net_dev->ifindex, 1271 strict); 1272 bool ret; 1273 1274 if (!rt) 1275 return false; 1276 1277 ret = rt->rt6i_idev->dev == net_dev; 1278 ip6_rt_put(rt); 1279 1280 return ret; 1281 #else 1282 return false; 1283 #endif 1284 } 1285 1286 static bool validate_net_dev(struct net_device *net_dev, 1287 const struct sockaddr *daddr, 1288 const struct sockaddr *saddr) 1289 { 1290 const struct sockaddr_in *daddr4 = (const struct sockaddr_in *)daddr; 1291 const struct sockaddr_in *saddr4 = (const struct sockaddr_in *)saddr; 1292 const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr; 1293 const struct sockaddr_in6 *saddr6 = (const struct sockaddr_in6 *)saddr; 1294 1295 switch (daddr->sa_family) { 1296 case AF_INET: 1297 return saddr->sa_family == AF_INET && 1298 validate_ipv4_net_dev(net_dev, daddr4, saddr4); 1299 1300 case AF_INET6: 1301 return saddr->sa_family == AF_INET6 && 1302 validate_ipv6_net_dev(net_dev, daddr6, saddr6); 1303 1304 default: 1305 return false; 1306 } 1307 } 1308 1309 static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event, 1310 const struct cma_req_info *req) 1311 { 1312 struct sockaddr_storage listen_addr_storage, src_addr_storage; 1313 struct sockaddr *listen_addr = (struct sockaddr *)&listen_addr_storage, 1314 *src_addr = (struct sockaddr *)&src_addr_storage; 1315 struct net_device *net_dev; 1316 const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL; 1317 int err; 1318 1319 err = cma_save_ip_info(listen_addr, src_addr, ib_event, 1320 req->service_id); 1321 if (err) 1322 return ERR_PTR(err); 1323 1324 net_dev = ib_get_net_dev_by_params(req->device, req->port, req->pkey, 1325 gid, listen_addr); 1326 if (!net_dev) 1327 return ERR_PTR(-ENODEV); 1328 1329 if (!validate_net_dev(net_dev, listen_addr, src_addr)) { 1330 dev_put(net_dev); 1331 return ERR_PTR(-EHOSTUNREACH); 1332 } 1333 1334 return net_dev; 1335 } 1336 1337 static enum rdma_port_space rdma_ps_from_service_id(__be64 service_id) 1338 { 1339 return (be64_to_cpu(service_id) >> 16) & 0xffff; 1340 } 1341 1342 static bool cma_match_private_data(struct rdma_id_private *id_priv, 1343 const struct cma_hdr *hdr) 1344 { 1345 struct sockaddr *addr = cma_src_addr(id_priv); 1346 __be32 ip4_addr; 1347 struct in6_addr ip6_addr; 1348 1349 if (cma_any_addr(addr) && !id_priv->afonly) 1350 return true; 1351 1352 switch (addr->sa_family) { 1353 case AF_INET: 1354 ip4_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr; 1355 if (cma_get_ip_ver(hdr) != 4) 1356 return false; 1357 if (!cma_any_addr(addr) && 1358 hdr->dst_addr.ip4.addr != ip4_addr) 1359 return false; 1360 break; 1361 case AF_INET6: 1362 ip6_addr = ((struct sockaddr_in6 *)addr)->sin6_addr; 1363 if (cma_get_ip_ver(hdr) != 6) 1364 return false; 1365 if (!cma_any_addr(addr) && 1366 memcmp(&hdr->dst_addr.ip6, &ip6_addr, sizeof(ip6_addr))) 1367 return false; 1368 break; 1369 case AF_IB: 1370 return true; 1371 default: 1372 return false; 1373 } 1374 1375 return true; 1376 } 1377 1378 static bool cma_protocol_roce_dev_port(struct ib_device *device, int port_num) 1379 { 1380 enum rdma_link_layer ll = rdma_port_get_link_layer(device, port_num); 1381 enum rdma_transport_type transport = 1382 rdma_node_get_transport(device->node_type); 1383 1384 return ll == IB_LINK_LAYER_ETHERNET && transport == RDMA_TRANSPORT_IB; 1385 } 1386 1387 static bool cma_protocol_roce(const struct rdma_cm_id *id) 1388 { 1389 struct ib_device *device = id->device; 1390 const int port_num = id->port_num ?: rdma_start_port(device); 1391 1392 return cma_protocol_roce_dev_port(device, port_num); 1393 } 1394 1395 static bool cma_match_net_dev(const struct rdma_cm_id *id, 1396 const struct net_device *net_dev, 1397 u8 port_num) 1398 { 1399 const struct rdma_addr *addr = &id->route.addr; 1400 1401 if (!net_dev) 1402 /* This request is an AF_IB request or a RoCE request */ 1403 return (!id->port_num || id->port_num == port_num) && 1404 (addr->src_addr.ss_family == AF_IB || 1405 cma_protocol_roce_dev_port(id->device, port_num)); 1406 1407 return !addr->dev_addr.bound_dev_if || 1408 (net_eq(dev_net(net_dev), addr->dev_addr.net) && 1409 addr->dev_addr.bound_dev_if == net_dev->ifindex); 1410 } 1411 1412 static struct rdma_id_private *cma_find_listener( 1413 const struct rdma_bind_list *bind_list, 1414 const struct ib_cm_id *cm_id, 1415 const struct ib_cm_event *ib_event, 1416 const struct cma_req_info *req, 1417 const struct net_device *net_dev) 1418 { 1419 struct rdma_id_private *id_priv, *id_priv_dev; 1420 1421 if (!bind_list) 1422 return ERR_PTR(-EINVAL); 1423 1424 hlist_for_each_entry(id_priv, &bind_list->owners, node) { 1425 if (cma_match_private_data(id_priv, ib_event->private_data)) { 1426 if (id_priv->id.device == cm_id->device && 1427 cma_match_net_dev(&id_priv->id, net_dev, req->port)) 1428 return id_priv; 1429 list_for_each_entry(id_priv_dev, 1430 &id_priv->listen_list, 1431 listen_list) { 1432 if (id_priv_dev->id.device == cm_id->device && 1433 cma_match_net_dev(&id_priv_dev->id, net_dev, req->port)) 1434 return id_priv_dev; 1435 } 1436 } 1437 } 1438 1439 return ERR_PTR(-EINVAL); 1440 } 1441 1442 static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id, 1443 struct ib_cm_event *ib_event, 1444 struct net_device **net_dev) 1445 { 1446 struct cma_req_info req; 1447 struct rdma_bind_list *bind_list; 1448 struct rdma_id_private *id_priv; 1449 int err; 1450 1451 err = cma_save_req_info(ib_event, &req); 1452 if (err) 1453 return ERR_PTR(err); 1454 1455 *net_dev = cma_get_net_dev(ib_event, &req); 1456 if (IS_ERR(*net_dev)) { 1457 if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) { 1458 /* Assuming the protocol is AF_IB */ 1459 *net_dev = NULL; 1460 } else if (cma_protocol_roce_dev_port(req.device, req.port)) { 1461 /* TODO find the net dev matching the request parameters 1462 * through the RoCE GID table */ 1463 *net_dev = NULL; 1464 } else { 1465 return ERR_CAST(*net_dev); 1466 } 1467 } 1468 1469 bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net, 1470 rdma_ps_from_service_id(req.service_id), 1471 cma_port_from_service_id(req.service_id)); 1472 id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev); 1473 if (IS_ERR(id_priv) && *net_dev) { 1474 dev_put(*net_dev); 1475 *net_dev = NULL; 1476 } 1477 1478 return id_priv; 1479 } 1480 1481 static inline int cma_user_data_offset(struct rdma_id_private *id_priv) 1482 { 1483 return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr); 1484 } 1485 1486 static void cma_cancel_route(struct rdma_id_private *id_priv) 1487 { 1488 if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) { 1489 if (id_priv->query) 1490 ib_sa_cancel_query(id_priv->query_id, id_priv->query); 1491 } 1492 } 1493 1494 static void cma_cancel_listens(struct rdma_id_private *id_priv) 1495 { 1496 struct rdma_id_private *dev_id_priv; 1497 1498 /* 1499 * Remove from listen_any_list to prevent added devices from spawning 1500 * additional listen requests. 1501 */ 1502 mutex_lock(&lock); 1503 list_del(&id_priv->list); 1504 1505 while (!list_empty(&id_priv->listen_list)) { 1506 dev_id_priv = list_entry(id_priv->listen_list.next, 1507 struct rdma_id_private, listen_list); 1508 /* sync with device removal to avoid duplicate destruction */ 1509 list_del_init(&dev_id_priv->list); 1510 list_del(&dev_id_priv->listen_list); 1511 mutex_unlock(&lock); 1512 1513 rdma_destroy_id(&dev_id_priv->id); 1514 mutex_lock(&lock); 1515 } 1516 mutex_unlock(&lock); 1517 } 1518 1519 static void cma_cancel_operation(struct rdma_id_private *id_priv, 1520 enum rdma_cm_state state) 1521 { 1522 switch (state) { 1523 case RDMA_CM_ADDR_QUERY: 1524 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); 1525 break; 1526 case RDMA_CM_ROUTE_QUERY: 1527 cma_cancel_route(id_priv); 1528 break; 1529 case RDMA_CM_LISTEN: 1530 if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev) 1531 cma_cancel_listens(id_priv); 1532 break; 1533 default: 1534 break; 1535 } 1536 } 1537 1538 static void cma_release_port(struct rdma_id_private *id_priv) 1539 { 1540 struct rdma_bind_list *bind_list = id_priv->bind_list; 1541 struct net *net = id_priv->id.route.addr.dev_addr.net; 1542 1543 if (!bind_list) 1544 return; 1545 1546 mutex_lock(&lock); 1547 hlist_del(&id_priv->node); 1548 if (hlist_empty(&bind_list->owners)) { 1549 cma_ps_remove(net, bind_list->ps, bind_list->port); 1550 kfree(bind_list); 1551 } 1552 mutex_unlock(&lock); 1553 } 1554 1555 static void cma_leave_mc_groups(struct rdma_id_private *id_priv) 1556 { 1557 struct cma_multicast *mc; 1558 1559 while (!list_empty(&id_priv->mc_list)) { 1560 mc = container_of(id_priv->mc_list.next, 1561 struct cma_multicast, list); 1562 list_del(&mc->list); 1563 if (rdma_cap_ib_mcast(id_priv->cma_dev->device, 1564 id_priv->id.port_num)) { 1565 ib_sa_free_multicast(mc->multicast.ib); 1566 kfree(mc); 1567 } else { 1568 if (mc->igmp_joined) { 1569 struct rdma_dev_addr *dev_addr = 1570 &id_priv->id.route.addr.dev_addr; 1571 struct net_device *ndev = NULL; 1572 1573 if (dev_addr->bound_dev_if) 1574 ndev = dev_get_by_index(&init_net, 1575 dev_addr->bound_dev_if); 1576 if (ndev) { 1577 cma_igmp_send(ndev, 1578 &mc->multicast.ib->rec.mgid, 1579 false); 1580 dev_put(ndev); 1581 } 1582 } 1583 kref_put(&mc->mcref, release_mc); 1584 } 1585 } 1586 } 1587 1588 void rdma_destroy_id(struct rdma_cm_id *id) 1589 { 1590 struct rdma_id_private *id_priv; 1591 enum rdma_cm_state state; 1592 1593 id_priv = container_of(id, struct rdma_id_private, id); 1594 state = cma_exch(id_priv, RDMA_CM_DESTROYING); 1595 cma_cancel_operation(id_priv, state); 1596 1597 /* 1598 * Wait for any active callback to finish. New callbacks will find 1599 * the id_priv state set to destroying and abort. 1600 */ 1601 mutex_lock(&id_priv->handler_mutex); 1602 mutex_unlock(&id_priv->handler_mutex); 1603 1604 if (id_priv->cma_dev) { 1605 if (rdma_cap_ib_cm(id_priv->id.device, 1)) { 1606 if (id_priv->cm_id.ib) 1607 ib_destroy_cm_id(id_priv->cm_id.ib); 1608 } else if (rdma_cap_iw_cm(id_priv->id.device, 1)) { 1609 if (id_priv->cm_id.iw) 1610 iw_destroy_cm_id(id_priv->cm_id.iw); 1611 } 1612 cma_leave_mc_groups(id_priv); 1613 cma_release_dev(id_priv); 1614 } 1615 1616 cma_release_port(id_priv); 1617 cma_deref_id(id_priv); 1618 wait_for_completion(&id_priv->comp); 1619 1620 if (id_priv->internal_id) 1621 cma_deref_id(id_priv->id.context); 1622 1623 kfree(id_priv->id.route.path_rec); 1624 put_net(id_priv->id.route.addr.dev_addr.net); 1625 kfree(id_priv); 1626 } 1627 EXPORT_SYMBOL(rdma_destroy_id); 1628 1629 static int cma_rep_recv(struct rdma_id_private *id_priv) 1630 { 1631 int ret; 1632 1633 ret = cma_modify_qp_rtr(id_priv, NULL); 1634 if (ret) 1635 goto reject; 1636 1637 ret = cma_modify_qp_rts(id_priv, NULL); 1638 if (ret) 1639 goto reject; 1640 1641 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0); 1642 if (ret) 1643 goto reject; 1644 1645 return 0; 1646 reject: 1647 cma_modify_qp_err(id_priv); 1648 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, 1649 NULL, 0, NULL, 0); 1650 return ret; 1651 } 1652 1653 static void cma_set_rep_event_data(struct rdma_cm_event *event, 1654 struct ib_cm_rep_event_param *rep_data, 1655 void *private_data) 1656 { 1657 event->param.conn.private_data = private_data; 1658 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE; 1659 event->param.conn.responder_resources = rep_data->responder_resources; 1660 event->param.conn.initiator_depth = rep_data->initiator_depth; 1661 event->param.conn.flow_control = rep_data->flow_control; 1662 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count; 1663 event->param.conn.srq = rep_data->srq; 1664 event->param.conn.qp_num = rep_data->remote_qpn; 1665 } 1666 1667 static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 1668 { 1669 struct rdma_id_private *id_priv = cm_id->context; 1670 struct rdma_cm_event event; 1671 int ret = 0; 1672 1673 mutex_lock(&id_priv->handler_mutex); 1674 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && 1675 id_priv->state != RDMA_CM_CONNECT) || 1676 (ib_event->event == IB_CM_TIMEWAIT_EXIT && 1677 id_priv->state != RDMA_CM_DISCONNECT)) 1678 goto out; 1679 1680 memset(&event, 0, sizeof event); 1681 switch (ib_event->event) { 1682 case IB_CM_REQ_ERROR: 1683 case IB_CM_REP_ERROR: 1684 event.event = RDMA_CM_EVENT_UNREACHABLE; 1685 event.status = -ETIMEDOUT; 1686 break; 1687 case IB_CM_REP_RECEIVED: 1688 if (id_priv->id.qp) { 1689 event.status = cma_rep_recv(id_priv); 1690 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR : 1691 RDMA_CM_EVENT_ESTABLISHED; 1692 } else { 1693 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE; 1694 } 1695 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd, 1696 ib_event->private_data); 1697 break; 1698 case IB_CM_RTU_RECEIVED: 1699 case IB_CM_USER_ESTABLISHED: 1700 event.event = RDMA_CM_EVENT_ESTABLISHED; 1701 break; 1702 case IB_CM_DREQ_ERROR: 1703 event.status = -ETIMEDOUT; /* fall through */ 1704 case IB_CM_DREQ_RECEIVED: 1705 case IB_CM_DREP_RECEIVED: 1706 if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT, 1707 RDMA_CM_DISCONNECT)) 1708 goto out; 1709 event.event = RDMA_CM_EVENT_DISCONNECTED; 1710 break; 1711 case IB_CM_TIMEWAIT_EXIT: 1712 event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT; 1713 break; 1714 case IB_CM_MRA_RECEIVED: 1715 /* ignore event */ 1716 goto out; 1717 case IB_CM_REJ_RECEIVED: 1718 cma_modify_qp_err(id_priv); 1719 event.status = ib_event->param.rej_rcvd.reason; 1720 event.event = RDMA_CM_EVENT_REJECTED; 1721 event.param.conn.private_data = ib_event->private_data; 1722 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; 1723 break; 1724 default: 1725 pr_err("RDMA CMA: unexpected IB CM event: %d\n", 1726 ib_event->event); 1727 goto out; 1728 } 1729 1730 ret = id_priv->id.event_handler(&id_priv->id, &event); 1731 if (ret) { 1732 /* Destroy the CM ID by returning a non-zero value. */ 1733 id_priv->cm_id.ib = NULL; 1734 cma_exch(id_priv, RDMA_CM_DESTROYING); 1735 mutex_unlock(&id_priv->handler_mutex); 1736 rdma_destroy_id(&id_priv->id); 1737 return ret; 1738 } 1739 out: 1740 mutex_unlock(&id_priv->handler_mutex); 1741 return ret; 1742 } 1743 1744 static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, 1745 struct ib_cm_event *ib_event, 1746 struct net_device *net_dev) 1747 { 1748 struct rdma_id_private *id_priv; 1749 struct rdma_cm_id *id; 1750 struct rdma_route *rt; 1751 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; 1752 const __be64 service_id = 1753 ib_event->param.req_rcvd.primary_path->service_id; 1754 int ret; 1755 1756 id = rdma_create_id(listen_id->route.addr.dev_addr.net, 1757 listen_id->event_handler, listen_id->context, 1758 listen_id->ps, ib_event->param.req_rcvd.qp_type); 1759 if (IS_ERR(id)) 1760 return NULL; 1761 1762 id_priv = container_of(id, struct rdma_id_private, id); 1763 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, 1764 (struct sockaddr *)&id->route.addr.dst_addr, 1765 listen_id, ib_event, ss_family, service_id)) 1766 goto err; 1767 1768 rt = &id->route; 1769 rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1; 1770 rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths, 1771 GFP_KERNEL); 1772 if (!rt->path_rec) 1773 goto err; 1774 1775 rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path; 1776 if (rt->num_paths == 2) 1777 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; 1778 1779 if (net_dev) { 1780 ret = rdma_copy_addr(&rt->addr.dev_addr, net_dev, NULL); 1781 if (ret) 1782 goto err; 1783 } else { 1784 if (!cma_protocol_roce(listen_id) && 1785 cma_any_addr(cma_src_addr(id_priv))) { 1786 rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND; 1787 rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); 1788 ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey)); 1789 } else if (!cma_any_addr(cma_src_addr(id_priv))) { 1790 ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr); 1791 if (ret) 1792 goto err; 1793 } 1794 } 1795 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); 1796 1797 id_priv->state = RDMA_CM_CONNECT; 1798 return id_priv; 1799 1800 err: 1801 rdma_destroy_id(id); 1802 return NULL; 1803 } 1804 1805 static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id, 1806 struct ib_cm_event *ib_event, 1807 struct net_device *net_dev) 1808 { 1809 struct rdma_id_private *id_priv; 1810 struct rdma_cm_id *id; 1811 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; 1812 struct net *net = listen_id->route.addr.dev_addr.net; 1813 int ret; 1814 1815 id = rdma_create_id(net, listen_id->event_handler, listen_id->context, 1816 listen_id->ps, IB_QPT_UD); 1817 if (IS_ERR(id)) 1818 return NULL; 1819 1820 id_priv = container_of(id, struct rdma_id_private, id); 1821 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, 1822 (struct sockaddr *)&id->route.addr.dst_addr, 1823 listen_id, ib_event, ss_family, 1824 ib_event->param.sidr_req_rcvd.service_id)) 1825 goto err; 1826 1827 if (net_dev) { 1828 ret = rdma_copy_addr(&id->route.addr.dev_addr, net_dev, NULL); 1829 if (ret) 1830 goto err; 1831 } else { 1832 if (!cma_any_addr(cma_src_addr(id_priv))) { 1833 ret = cma_translate_addr(cma_src_addr(id_priv), 1834 &id->route.addr.dev_addr); 1835 if (ret) 1836 goto err; 1837 } 1838 } 1839 1840 id_priv->state = RDMA_CM_CONNECT; 1841 return id_priv; 1842 err: 1843 rdma_destroy_id(id); 1844 return NULL; 1845 } 1846 1847 static void cma_set_req_event_data(struct rdma_cm_event *event, 1848 struct ib_cm_req_event_param *req_data, 1849 void *private_data, int offset) 1850 { 1851 event->param.conn.private_data = private_data + offset; 1852 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset; 1853 event->param.conn.responder_resources = req_data->responder_resources; 1854 event->param.conn.initiator_depth = req_data->initiator_depth; 1855 event->param.conn.flow_control = req_data->flow_control; 1856 event->param.conn.retry_count = req_data->retry_count; 1857 event->param.conn.rnr_retry_count = req_data->rnr_retry_count; 1858 event->param.conn.srq = req_data->srq; 1859 event->param.conn.qp_num = req_data->remote_qpn; 1860 } 1861 1862 static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event) 1863 { 1864 return (((ib_event->event == IB_CM_REQ_RECEIVED) && 1865 (ib_event->param.req_rcvd.qp_type == id->qp_type)) || 1866 ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) && 1867 (id->qp_type == IB_QPT_UD)) || 1868 (!id->qp_type)); 1869 } 1870 1871 static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 1872 { 1873 struct rdma_id_private *listen_id, *conn_id = NULL; 1874 struct rdma_cm_event event; 1875 struct net_device *net_dev; 1876 int offset, ret; 1877 1878 listen_id = cma_id_from_event(cm_id, ib_event, &net_dev); 1879 if (IS_ERR(listen_id)) 1880 return PTR_ERR(listen_id); 1881 1882 if (!cma_check_req_qp_type(&listen_id->id, ib_event)) { 1883 ret = -EINVAL; 1884 goto net_dev_put; 1885 } 1886 1887 mutex_lock(&listen_id->handler_mutex); 1888 if (listen_id->state != RDMA_CM_LISTEN) { 1889 ret = -ECONNABORTED; 1890 goto err1; 1891 } 1892 1893 memset(&event, 0, sizeof event); 1894 offset = cma_user_data_offset(listen_id); 1895 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 1896 if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) { 1897 conn_id = cma_new_udp_id(&listen_id->id, ib_event, net_dev); 1898 event.param.ud.private_data = ib_event->private_data + offset; 1899 event.param.ud.private_data_len = 1900 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset; 1901 } else { 1902 conn_id = cma_new_conn_id(&listen_id->id, ib_event, net_dev); 1903 cma_set_req_event_data(&event, &ib_event->param.req_rcvd, 1904 ib_event->private_data, offset); 1905 } 1906 if (!conn_id) { 1907 ret = -ENOMEM; 1908 goto err1; 1909 } 1910 1911 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 1912 ret = cma_acquire_dev(conn_id, listen_id); 1913 if (ret) 1914 goto err2; 1915 1916 conn_id->cm_id.ib = cm_id; 1917 cm_id->context = conn_id; 1918 cm_id->cm_handler = cma_ib_handler; 1919 1920 /* 1921 * Protect against the user destroying conn_id from another thread 1922 * until we're done accessing it. 1923 */ 1924 atomic_inc(&conn_id->refcount); 1925 ret = conn_id->id.event_handler(&conn_id->id, &event); 1926 if (ret) 1927 goto err3; 1928 /* 1929 * Acquire mutex to prevent user executing rdma_destroy_id() 1930 * while we're accessing the cm_id. 1931 */ 1932 mutex_lock(&lock); 1933 if (cma_comp(conn_id, RDMA_CM_CONNECT) && 1934 (conn_id->id.qp_type != IB_QPT_UD)) 1935 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); 1936 mutex_unlock(&lock); 1937 mutex_unlock(&conn_id->handler_mutex); 1938 mutex_unlock(&listen_id->handler_mutex); 1939 cma_deref_id(conn_id); 1940 if (net_dev) 1941 dev_put(net_dev); 1942 return 0; 1943 1944 err3: 1945 cma_deref_id(conn_id); 1946 /* Destroy the CM ID by returning a non-zero value. */ 1947 conn_id->cm_id.ib = NULL; 1948 err2: 1949 cma_exch(conn_id, RDMA_CM_DESTROYING); 1950 mutex_unlock(&conn_id->handler_mutex); 1951 err1: 1952 mutex_unlock(&listen_id->handler_mutex); 1953 if (conn_id) 1954 rdma_destroy_id(&conn_id->id); 1955 1956 net_dev_put: 1957 if (net_dev) 1958 dev_put(net_dev); 1959 1960 return ret; 1961 } 1962 1963 __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr) 1964 { 1965 if (addr->sa_family == AF_IB) 1966 return ((struct sockaddr_ib *) addr)->sib_sid; 1967 1968 return cpu_to_be64(((u64)id->ps << 16) + be16_to_cpu(cma_port(addr))); 1969 } 1970 EXPORT_SYMBOL(rdma_get_service_id); 1971 1972 static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) 1973 { 1974 struct rdma_id_private *id_priv = iw_id->context; 1975 struct rdma_cm_event event; 1976 int ret = 0; 1977 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; 1978 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; 1979 1980 mutex_lock(&id_priv->handler_mutex); 1981 if (id_priv->state != RDMA_CM_CONNECT) 1982 goto out; 1983 1984 memset(&event, 0, sizeof event); 1985 switch (iw_event->event) { 1986 case IW_CM_EVENT_CLOSE: 1987 event.event = RDMA_CM_EVENT_DISCONNECTED; 1988 break; 1989 case IW_CM_EVENT_CONNECT_REPLY: 1990 memcpy(cma_src_addr(id_priv), laddr, 1991 rdma_addr_size(laddr)); 1992 memcpy(cma_dst_addr(id_priv), raddr, 1993 rdma_addr_size(raddr)); 1994 switch (iw_event->status) { 1995 case 0: 1996 event.event = RDMA_CM_EVENT_ESTABLISHED; 1997 event.param.conn.initiator_depth = iw_event->ird; 1998 event.param.conn.responder_resources = iw_event->ord; 1999 break; 2000 case -ECONNRESET: 2001 case -ECONNREFUSED: 2002 event.event = RDMA_CM_EVENT_REJECTED; 2003 break; 2004 case -ETIMEDOUT: 2005 event.event = RDMA_CM_EVENT_UNREACHABLE; 2006 break; 2007 default: 2008 event.event = RDMA_CM_EVENT_CONNECT_ERROR; 2009 break; 2010 } 2011 break; 2012 case IW_CM_EVENT_ESTABLISHED: 2013 event.event = RDMA_CM_EVENT_ESTABLISHED; 2014 event.param.conn.initiator_depth = iw_event->ird; 2015 event.param.conn.responder_resources = iw_event->ord; 2016 break; 2017 default: 2018 BUG_ON(1); 2019 } 2020 2021 event.status = iw_event->status; 2022 event.param.conn.private_data = iw_event->private_data; 2023 event.param.conn.private_data_len = iw_event->private_data_len; 2024 ret = id_priv->id.event_handler(&id_priv->id, &event); 2025 if (ret) { 2026 /* Destroy the CM ID by returning a non-zero value. */ 2027 id_priv->cm_id.iw = NULL; 2028 cma_exch(id_priv, RDMA_CM_DESTROYING); 2029 mutex_unlock(&id_priv->handler_mutex); 2030 rdma_destroy_id(&id_priv->id); 2031 return ret; 2032 } 2033 2034 out: 2035 mutex_unlock(&id_priv->handler_mutex); 2036 return ret; 2037 } 2038 2039 static int iw_conn_req_handler(struct iw_cm_id *cm_id, 2040 struct iw_cm_event *iw_event) 2041 { 2042 struct rdma_cm_id *new_cm_id; 2043 struct rdma_id_private *listen_id, *conn_id; 2044 struct rdma_cm_event event; 2045 int ret = -ECONNABORTED; 2046 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; 2047 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; 2048 2049 listen_id = cm_id->context; 2050 2051 mutex_lock(&listen_id->handler_mutex); 2052 if (listen_id->state != RDMA_CM_LISTEN) 2053 goto out; 2054 2055 /* Create a new RDMA id for the new IW CM ID */ 2056 new_cm_id = rdma_create_id(listen_id->id.route.addr.dev_addr.net, 2057 listen_id->id.event_handler, 2058 listen_id->id.context, 2059 RDMA_PS_TCP, IB_QPT_RC); 2060 if (IS_ERR(new_cm_id)) { 2061 ret = -ENOMEM; 2062 goto out; 2063 } 2064 conn_id = container_of(new_cm_id, struct rdma_id_private, id); 2065 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 2066 conn_id->state = RDMA_CM_CONNECT; 2067 2068 ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr, NULL); 2069 if (ret) { 2070 mutex_unlock(&conn_id->handler_mutex); 2071 rdma_destroy_id(new_cm_id); 2072 goto out; 2073 } 2074 2075 ret = cma_acquire_dev(conn_id, listen_id); 2076 if (ret) { 2077 mutex_unlock(&conn_id->handler_mutex); 2078 rdma_destroy_id(new_cm_id); 2079 goto out; 2080 } 2081 2082 conn_id->cm_id.iw = cm_id; 2083 cm_id->context = conn_id; 2084 cm_id->cm_handler = cma_iw_handler; 2085 2086 memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr)); 2087 memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr)); 2088 2089 memset(&event, 0, sizeof event); 2090 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 2091 event.param.conn.private_data = iw_event->private_data; 2092 event.param.conn.private_data_len = iw_event->private_data_len; 2093 event.param.conn.initiator_depth = iw_event->ird; 2094 event.param.conn.responder_resources = iw_event->ord; 2095 2096 /* 2097 * Protect against the user destroying conn_id from another thread 2098 * until we're done accessing it. 2099 */ 2100 atomic_inc(&conn_id->refcount); 2101 ret = conn_id->id.event_handler(&conn_id->id, &event); 2102 if (ret) { 2103 /* User wants to destroy the CM ID */ 2104 conn_id->cm_id.iw = NULL; 2105 cma_exch(conn_id, RDMA_CM_DESTROYING); 2106 mutex_unlock(&conn_id->handler_mutex); 2107 cma_deref_id(conn_id); 2108 rdma_destroy_id(&conn_id->id); 2109 goto out; 2110 } 2111 2112 mutex_unlock(&conn_id->handler_mutex); 2113 cma_deref_id(conn_id); 2114 2115 out: 2116 mutex_unlock(&listen_id->handler_mutex); 2117 return ret; 2118 } 2119 2120 static int cma_ib_listen(struct rdma_id_private *id_priv) 2121 { 2122 struct sockaddr *addr; 2123 struct ib_cm_id *id; 2124 __be64 svc_id; 2125 2126 addr = cma_src_addr(id_priv); 2127 svc_id = rdma_get_service_id(&id_priv->id, addr); 2128 id = ib_cm_insert_listen(id_priv->id.device, cma_req_handler, svc_id); 2129 if (IS_ERR(id)) 2130 return PTR_ERR(id); 2131 id_priv->cm_id.ib = id; 2132 2133 return 0; 2134 } 2135 2136 static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) 2137 { 2138 int ret; 2139 struct iw_cm_id *id; 2140 2141 id = iw_create_cm_id(id_priv->id.device, 2142 iw_conn_req_handler, 2143 id_priv); 2144 if (IS_ERR(id)) 2145 return PTR_ERR(id); 2146 2147 id->tos = id_priv->tos; 2148 id_priv->cm_id.iw = id; 2149 2150 memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv), 2151 rdma_addr_size(cma_src_addr(id_priv))); 2152 2153 ret = iw_cm_listen(id_priv->cm_id.iw, backlog); 2154 2155 if (ret) { 2156 iw_destroy_cm_id(id_priv->cm_id.iw); 2157 id_priv->cm_id.iw = NULL; 2158 } 2159 2160 return ret; 2161 } 2162 2163 static int cma_listen_handler(struct rdma_cm_id *id, 2164 struct rdma_cm_event *event) 2165 { 2166 struct rdma_id_private *id_priv = id->context; 2167 2168 id->context = id_priv->id.context; 2169 id->event_handler = id_priv->id.event_handler; 2170 return id_priv->id.event_handler(id, event); 2171 } 2172 2173 static void cma_listen_on_dev(struct rdma_id_private *id_priv, 2174 struct cma_device *cma_dev) 2175 { 2176 struct rdma_id_private *dev_id_priv; 2177 struct rdma_cm_id *id; 2178 struct net *net = id_priv->id.route.addr.dev_addr.net; 2179 int ret; 2180 2181 if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) 2182 return; 2183 2184 id = rdma_create_id(net, cma_listen_handler, id_priv, id_priv->id.ps, 2185 id_priv->id.qp_type); 2186 if (IS_ERR(id)) 2187 return; 2188 2189 dev_id_priv = container_of(id, struct rdma_id_private, id); 2190 2191 dev_id_priv->state = RDMA_CM_ADDR_BOUND; 2192 memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv), 2193 rdma_addr_size(cma_src_addr(id_priv))); 2194 2195 _cma_attach_to_dev(dev_id_priv, cma_dev); 2196 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); 2197 atomic_inc(&id_priv->refcount); 2198 dev_id_priv->internal_id = 1; 2199 dev_id_priv->afonly = id_priv->afonly; 2200 2201 ret = rdma_listen(id, id_priv->backlog); 2202 if (ret) 2203 pr_warn("RDMA CMA: cma_listen_on_dev, error %d, listening on device %s\n", 2204 ret, cma_dev->device->name); 2205 } 2206 2207 static void cma_listen_on_all(struct rdma_id_private *id_priv) 2208 { 2209 struct cma_device *cma_dev; 2210 2211 mutex_lock(&lock); 2212 list_add_tail(&id_priv->list, &listen_any_list); 2213 list_for_each_entry(cma_dev, &dev_list, list) 2214 cma_listen_on_dev(id_priv, cma_dev); 2215 mutex_unlock(&lock); 2216 } 2217 2218 void rdma_set_service_type(struct rdma_cm_id *id, int tos) 2219 { 2220 struct rdma_id_private *id_priv; 2221 2222 id_priv = container_of(id, struct rdma_id_private, id); 2223 id_priv->tos = (u8) tos; 2224 } 2225 EXPORT_SYMBOL(rdma_set_service_type); 2226 2227 static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec, 2228 void *context) 2229 { 2230 struct cma_work *work = context; 2231 struct rdma_route *route; 2232 2233 route = &work->id->id.route; 2234 2235 if (!status) { 2236 route->num_paths = 1; 2237 *route->path_rec = *path_rec; 2238 } else { 2239 work->old_state = RDMA_CM_ROUTE_QUERY; 2240 work->new_state = RDMA_CM_ADDR_RESOLVED; 2241 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; 2242 work->event.status = status; 2243 } 2244 2245 queue_work(cma_wq, &work->work); 2246 } 2247 2248 static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, 2249 struct cma_work *work) 2250 { 2251 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 2252 struct ib_sa_path_rec path_rec; 2253 ib_sa_comp_mask comp_mask; 2254 struct sockaddr_in6 *sin6; 2255 struct sockaddr_ib *sib; 2256 2257 memset(&path_rec, 0, sizeof path_rec); 2258 rdma_addr_get_sgid(dev_addr, &path_rec.sgid); 2259 rdma_addr_get_dgid(dev_addr, &path_rec.dgid); 2260 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 2261 path_rec.numb_path = 1; 2262 path_rec.reversible = 1; 2263 path_rec.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); 2264 2265 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | 2266 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | 2267 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID; 2268 2269 switch (cma_family(id_priv)) { 2270 case AF_INET: 2271 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); 2272 comp_mask |= IB_SA_PATH_REC_QOS_CLASS; 2273 break; 2274 case AF_INET6: 2275 sin6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); 2276 path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20); 2277 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; 2278 break; 2279 case AF_IB: 2280 sib = (struct sockaddr_ib *) cma_src_addr(id_priv); 2281 path_rec.traffic_class = (u8) (be32_to_cpu(sib->sib_flowinfo) >> 20); 2282 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; 2283 break; 2284 } 2285 2286 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, 2287 id_priv->id.port_num, &path_rec, 2288 comp_mask, timeout_ms, 2289 GFP_KERNEL, cma_query_handler, 2290 work, &id_priv->query); 2291 2292 return (id_priv->query_id < 0) ? id_priv->query_id : 0; 2293 } 2294 2295 static void cma_work_handler(struct work_struct *_work) 2296 { 2297 struct cma_work *work = container_of(_work, struct cma_work, work); 2298 struct rdma_id_private *id_priv = work->id; 2299 int destroy = 0; 2300 2301 mutex_lock(&id_priv->handler_mutex); 2302 if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) 2303 goto out; 2304 2305 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 2306 cma_exch(id_priv, RDMA_CM_DESTROYING); 2307 destroy = 1; 2308 } 2309 out: 2310 mutex_unlock(&id_priv->handler_mutex); 2311 cma_deref_id(id_priv); 2312 if (destroy) 2313 rdma_destroy_id(&id_priv->id); 2314 kfree(work); 2315 } 2316 2317 static void cma_ndev_work_handler(struct work_struct *_work) 2318 { 2319 struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work); 2320 struct rdma_id_private *id_priv = work->id; 2321 int destroy = 0; 2322 2323 mutex_lock(&id_priv->handler_mutex); 2324 if (id_priv->state == RDMA_CM_DESTROYING || 2325 id_priv->state == RDMA_CM_DEVICE_REMOVAL) 2326 goto out; 2327 2328 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 2329 cma_exch(id_priv, RDMA_CM_DESTROYING); 2330 destroy = 1; 2331 } 2332 2333 out: 2334 mutex_unlock(&id_priv->handler_mutex); 2335 cma_deref_id(id_priv); 2336 if (destroy) 2337 rdma_destroy_id(&id_priv->id); 2338 kfree(work); 2339 } 2340 2341 static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) 2342 { 2343 struct rdma_route *route = &id_priv->id.route; 2344 struct cma_work *work; 2345 int ret; 2346 2347 work = kzalloc(sizeof *work, GFP_KERNEL); 2348 if (!work) 2349 return -ENOMEM; 2350 2351 work->id = id_priv; 2352 INIT_WORK(&work->work, cma_work_handler); 2353 work->old_state = RDMA_CM_ROUTE_QUERY; 2354 work->new_state = RDMA_CM_ROUTE_RESOLVED; 2355 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 2356 2357 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); 2358 if (!route->path_rec) { 2359 ret = -ENOMEM; 2360 goto err1; 2361 } 2362 2363 ret = cma_query_ib_route(id_priv, timeout_ms, work); 2364 if (ret) 2365 goto err2; 2366 2367 return 0; 2368 err2: 2369 kfree(route->path_rec); 2370 route->path_rec = NULL; 2371 err1: 2372 kfree(work); 2373 return ret; 2374 } 2375 2376 int rdma_set_ib_paths(struct rdma_cm_id *id, 2377 struct ib_sa_path_rec *path_rec, int num_paths) 2378 { 2379 struct rdma_id_private *id_priv; 2380 int ret; 2381 2382 id_priv = container_of(id, struct rdma_id_private, id); 2383 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, 2384 RDMA_CM_ROUTE_RESOLVED)) 2385 return -EINVAL; 2386 2387 id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths, 2388 GFP_KERNEL); 2389 if (!id->route.path_rec) { 2390 ret = -ENOMEM; 2391 goto err; 2392 } 2393 2394 id->route.num_paths = num_paths; 2395 return 0; 2396 err: 2397 cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED); 2398 return ret; 2399 } 2400 EXPORT_SYMBOL(rdma_set_ib_paths); 2401 2402 static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms) 2403 { 2404 struct cma_work *work; 2405 2406 work = kzalloc(sizeof *work, GFP_KERNEL); 2407 if (!work) 2408 return -ENOMEM; 2409 2410 work->id = id_priv; 2411 INIT_WORK(&work->work, cma_work_handler); 2412 work->old_state = RDMA_CM_ROUTE_QUERY; 2413 work->new_state = RDMA_CM_ROUTE_RESOLVED; 2414 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 2415 queue_work(cma_wq, &work->work); 2416 return 0; 2417 } 2418 2419 static int iboe_tos_to_sl(struct net_device *ndev, int tos) 2420 { 2421 int prio; 2422 struct net_device *dev; 2423 2424 prio = rt_tos2priority(tos); 2425 dev = ndev->priv_flags & IFF_802_1Q_VLAN ? 2426 vlan_dev_real_dev(ndev) : ndev; 2427 2428 if (dev->num_tc) 2429 return netdev_get_prio_tc_map(dev, prio); 2430 2431 #if IS_ENABLED(CONFIG_VLAN_8021Q) 2432 if (ndev->priv_flags & IFF_802_1Q_VLAN) 2433 return (vlan_dev_get_egress_qos_mask(ndev, prio) & 2434 VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; 2435 #endif 2436 return 0; 2437 } 2438 2439 static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) 2440 { 2441 struct rdma_route *route = &id_priv->id.route; 2442 struct rdma_addr *addr = &route->addr; 2443 struct cma_work *work; 2444 int ret; 2445 struct net_device *ndev = NULL; 2446 2447 2448 work = kzalloc(sizeof *work, GFP_KERNEL); 2449 if (!work) 2450 return -ENOMEM; 2451 2452 work->id = id_priv; 2453 INIT_WORK(&work->work, cma_work_handler); 2454 2455 route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL); 2456 if (!route->path_rec) { 2457 ret = -ENOMEM; 2458 goto err1; 2459 } 2460 2461 route->num_paths = 1; 2462 2463 if (addr->dev_addr.bound_dev_if) { 2464 ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if); 2465 if (!ndev) { 2466 ret = -ENODEV; 2467 goto err2; 2468 } 2469 2470 if (ndev->flags & IFF_LOOPBACK) { 2471 dev_put(ndev); 2472 if (!id_priv->id.device->get_netdev) { 2473 ret = -EOPNOTSUPP; 2474 goto err2; 2475 } 2476 2477 ndev = id_priv->id.device->get_netdev(id_priv->id.device, 2478 id_priv->id.port_num); 2479 if (!ndev) { 2480 ret = -ENODEV; 2481 goto err2; 2482 } 2483 } 2484 2485 route->path_rec->net = &init_net; 2486 route->path_rec->ifindex = ndev->ifindex; 2487 route->path_rec->gid_type = id_priv->gid_type; 2488 } 2489 if (!ndev) { 2490 ret = -ENODEV; 2491 goto err2; 2492 } 2493 2494 memcpy(route->path_rec->dmac, addr->dev_addr.dst_dev_addr, ETH_ALEN); 2495 2496 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, 2497 &route->path_rec->sgid); 2498 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr, 2499 &route->path_rec->dgid); 2500 2501 /* Use the hint from IP Stack to select GID Type */ 2502 if (route->path_rec->gid_type < ib_network_to_gid_type(addr->dev_addr.network)) 2503 route->path_rec->gid_type = ib_network_to_gid_type(addr->dev_addr.network); 2504 if (((struct sockaddr *)&id_priv->id.route.addr.dst_addr)->sa_family != AF_IB) 2505 /* TODO: get the hoplimit from the inet/inet6 device */ 2506 route->path_rec->hop_limit = addr->dev_addr.hoplimit; 2507 else 2508 route->path_rec->hop_limit = 1; 2509 route->path_rec->reversible = 1; 2510 route->path_rec->pkey = cpu_to_be16(0xffff); 2511 route->path_rec->mtu_selector = IB_SA_EQ; 2512 route->path_rec->sl = iboe_tos_to_sl(ndev, id_priv->tos); 2513 route->path_rec->mtu = iboe_get_mtu(ndev->mtu); 2514 route->path_rec->rate_selector = IB_SA_EQ; 2515 route->path_rec->rate = iboe_get_rate(ndev); 2516 dev_put(ndev); 2517 route->path_rec->packet_life_time_selector = IB_SA_EQ; 2518 route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME; 2519 if (!route->path_rec->mtu) { 2520 ret = -EINVAL; 2521 goto err2; 2522 } 2523 2524 work->old_state = RDMA_CM_ROUTE_QUERY; 2525 work->new_state = RDMA_CM_ROUTE_RESOLVED; 2526 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 2527 work->event.status = 0; 2528 2529 queue_work(cma_wq, &work->work); 2530 2531 return 0; 2532 2533 err2: 2534 kfree(route->path_rec); 2535 route->path_rec = NULL; 2536 err1: 2537 kfree(work); 2538 return ret; 2539 } 2540 2541 int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) 2542 { 2543 struct rdma_id_private *id_priv; 2544 int ret; 2545 2546 id_priv = container_of(id, struct rdma_id_private, id); 2547 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY)) 2548 return -EINVAL; 2549 2550 atomic_inc(&id_priv->refcount); 2551 if (rdma_cap_ib_sa(id->device, id->port_num)) 2552 ret = cma_resolve_ib_route(id_priv, timeout_ms); 2553 else if (rdma_protocol_roce(id->device, id->port_num)) 2554 ret = cma_resolve_iboe_route(id_priv); 2555 else if (rdma_protocol_iwarp(id->device, id->port_num)) 2556 ret = cma_resolve_iw_route(id_priv, timeout_ms); 2557 else 2558 ret = -ENOSYS; 2559 2560 if (ret) 2561 goto err; 2562 2563 return 0; 2564 err: 2565 cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED); 2566 cma_deref_id(id_priv); 2567 return ret; 2568 } 2569 EXPORT_SYMBOL(rdma_resolve_route); 2570 2571 static void cma_set_loopback(struct sockaddr *addr) 2572 { 2573 switch (addr->sa_family) { 2574 case AF_INET: 2575 ((struct sockaddr_in *) addr)->sin_addr.s_addr = htonl(INADDR_LOOPBACK); 2576 break; 2577 case AF_INET6: 2578 ipv6_addr_set(&((struct sockaddr_in6 *) addr)->sin6_addr, 2579 0, 0, 0, htonl(1)); 2580 break; 2581 default: 2582 ib_addr_set(&((struct sockaddr_ib *) addr)->sib_addr, 2583 0, 0, 0, htonl(1)); 2584 break; 2585 } 2586 } 2587 2588 static int cma_bind_loopback(struct rdma_id_private *id_priv) 2589 { 2590 struct cma_device *cma_dev, *cur_dev; 2591 struct ib_port_attr port_attr; 2592 union ib_gid gid; 2593 u16 pkey; 2594 int ret; 2595 u8 p; 2596 2597 cma_dev = NULL; 2598 mutex_lock(&lock); 2599 list_for_each_entry(cur_dev, &dev_list, list) { 2600 if (cma_family(id_priv) == AF_IB && 2601 !rdma_cap_ib_cm(cur_dev->device, 1)) 2602 continue; 2603 2604 if (!cma_dev) 2605 cma_dev = cur_dev; 2606 2607 for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { 2608 if (!ib_query_port(cur_dev->device, p, &port_attr) && 2609 port_attr.state == IB_PORT_ACTIVE) { 2610 cma_dev = cur_dev; 2611 goto port_found; 2612 } 2613 } 2614 } 2615 2616 if (!cma_dev) { 2617 ret = -ENODEV; 2618 goto out; 2619 } 2620 2621 p = 1; 2622 2623 port_found: 2624 ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid, NULL); 2625 if (ret) 2626 goto out; 2627 2628 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey); 2629 if (ret) 2630 goto out; 2631 2632 id_priv->id.route.addr.dev_addr.dev_type = 2633 (rdma_protocol_ib(cma_dev->device, p)) ? 2634 ARPHRD_INFINIBAND : ARPHRD_ETHER; 2635 2636 rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); 2637 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); 2638 id_priv->id.port_num = p; 2639 cma_attach_to_dev(id_priv, cma_dev); 2640 cma_set_loopback(cma_src_addr(id_priv)); 2641 out: 2642 mutex_unlock(&lock); 2643 return ret; 2644 } 2645 2646 static void addr_handler(int status, struct sockaddr *src_addr, 2647 struct rdma_dev_addr *dev_addr, void *context) 2648 { 2649 struct rdma_id_private *id_priv = context; 2650 struct rdma_cm_event event; 2651 2652 memset(&event, 0, sizeof event); 2653 mutex_lock(&id_priv->handler_mutex); 2654 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, 2655 RDMA_CM_ADDR_RESOLVED)) 2656 goto out; 2657 2658 memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr)); 2659 if (!status && !id_priv->cma_dev) 2660 status = cma_acquire_dev(id_priv, NULL); 2661 2662 if (status) { 2663 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, 2664 RDMA_CM_ADDR_BOUND)) 2665 goto out; 2666 event.event = RDMA_CM_EVENT_ADDR_ERROR; 2667 event.status = status; 2668 } else 2669 event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 2670 2671 if (id_priv->id.event_handler(&id_priv->id, &event)) { 2672 cma_exch(id_priv, RDMA_CM_DESTROYING); 2673 mutex_unlock(&id_priv->handler_mutex); 2674 cma_deref_id(id_priv); 2675 rdma_destroy_id(&id_priv->id); 2676 return; 2677 } 2678 out: 2679 mutex_unlock(&id_priv->handler_mutex); 2680 cma_deref_id(id_priv); 2681 } 2682 2683 static int cma_resolve_loopback(struct rdma_id_private *id_priv) 2684 { 2685 struct cma_work *work; 2686 union ib_gid gid; 2687 int ret; 2688 2689 work = kzalloc(sizeof *work, GFP_KERNEL); 2690 if (!work) 2691 return -ENOMEM; 2692 2693 if (!id_priv->cma_dev) { 2694 ret = cma_bind_loopback(id_priv); 2695 if (ret) 2696 goto err; 2697 } 2698 2699 rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); 2700 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); 2701 2702 work->id = id_priv; 2703 INIT_WORK(&work->work, cma_work_handler); 2704 work->old_state = RDMA_CM_ADDR_QUERY; 2705 work->new_state = RDMA_CM_ADDR_RESOLVED; 2706 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 2707 queue_work(cma_wq, &work->work); 2708 return 0; 2709 err: 2710 kfree(work); 2711 return ret; 2712 } 2713 2714 static int cma_resolve_ib_addr(struct rdma_id_private *id_priv) 2715 { 2716 struct cma_work *work; 2717 int ret; 2718 2719 work = kzalloc(sizeof *work, GFP_KERNEL); 2720 if (!work) 2721 return -ENOMEM; 2722 2723 if (!id_priv->cma_dev) { 2724 ret = cma_resolve_ib_dev(id_priv); 2725 if (ret) 2726 goto err; 2727 } 2728 2729 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *) 2730 &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr)); 2731 2732 work->id = id_priv; 2733 INIT_WORK(&work->work, cma_work_handler); 2734 work->old_state = RDMA_CM_ADDR_QUERY; 2735 work->new_state = RDMA_CM_ADDR_RESOLVED; 2736 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 2737 queue_work(cma_wq, &work->work); 2738 return 0; 2739 err: 2740 kfree(work); 2741 return ret; 2742 } 2743 2744 static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 2745 struct sockaddr *dst_addr) 2746 { 2747 if (!src_addr || !src_addr->sa_family) { 2748 src_addr = (struct sockaddr *) &id->route.addr.src_addr; 2749 src_addr->sa_family = dst_addr->sa_family; 2750 if (dst_addr->sa_family == AF_INET6) { 2751 struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr; 2752 struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr; 2753 src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; 2754 if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) 2755 id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id; 2756 } else if (dst_addr->sa_family == AF_IB) { 2757 ((struct sockaddr_ib *) src_addr)->sib_pkey = 2758 ((struct sockaddr_ib *) dst_addr)->sib_pkey; 2759 } 2760 } 2761 return rdma_bind_addr(id, src_addr); 2762 } 2763 2764 int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 2765 struct sockaddr *dst_addr, int timeout_ms) 2766 { 2767 struct rdma_id_private *id_priv; 2768 int ret; 2769 2770 id_priv = container_of(id, struct rdma_id_private, id); 2771 if (id_priv->state == RDMA_CM_IDLE) { 2772 ret = cma_bind_addr(id, src_addr, dst_addr); 2773 if (ret) 2774 return ret; 2775 } 2776 2777 if (cma_family(id_priv) != dst_addr->sa_family) 2778 return -EINVAL; 2779 2780 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) 2781 return -EINVAL; 2782 2783 atomic_inc(&id_priv->refcount); 2784 memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr)); 2785 if (cma_any_addr(dst_addr)) { 2786 ret = cma_resolve_loopback(id_priv); 2787 } else { 2788 if (dst_addr->sa_family == AF_IB) { 2789 ret = cma_resolve_ib_addr(id_priv); 2790 } else { 2791 ret = rdma_resolve_ip(&addr_client, cma_src_addr(id_priv), 2792 dst_addr, &id->route.addr.dev_addr, 2793 timeout_ms, addr_handler, id_priv); 2794 } 2795 } 2796 if (ret) 2797 goto err; 2798 2799 return 0; 2800 err: 2801 cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); 2802 cma_deref_id(id_priv); 2803 return ret; 2804 } 2805 EXPORT_SYMBOL(rdma_resolve_addr); 2806 2807 int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse) 2808 { 2809 struct rdma_id_private *id_priv; 2810 unsigned long flags; 2811 int ret; 2812 2813 id_priv = container_of(id, struct rdma_id_private, id); 2814 spin_lock_irqsave(&id_priv->lock, flags); 2815 if (reuse || id_priv->state == RDMA_CM_IDLE) { 2816 id_priv->reuseaddr = reuse; 2817 ret = 0; 2818 } else { 2819 ret = -EINVAL; 2820 } 2821 spin_unlock_irqrestore(&id_priv->lock, flags); 2822 return ret; 2823 } 2824 EXPORT_SYMBOL(rdma_set_reuseaddr); 2825 2826 int rdma_set_afonly(struct rdma_cm_id *id, int afonly) 2827 { 2828 struct rdma_id_private *id_priv; 2829 unsigned long flags; 2830 int ret; 2831 2832 id_priv = container_of(id, struct rdma_id_private, id); 2833 spin_lock_irqsave(&id_priv->lock, flags); 2834 if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) { 2835 id_priv->options |= (1 << CMA_OPTION_AFONLY); 2836 id_priv->afonly = afonly; 2837 ret = 0; 2838 } else { 2839 ret = -EINVAL; 2840 } 2841 spin_unlock_irqrestore(&id_priv->lock, flags); 2842 return ret; 2843 } 2844 EXPORT_SYMBOL(rdma_set_afonly); 2845 2846 static void cma_bind_port(struct rdma_bind_list *bind_list, 2847 struct rdma_id_private *id_priv) 2848 { 2849 struct sockaddr *addr; 2850 struct sockaddr_ib *sib; 2851 u64 sid, mask; 2852 __be16 port; 2853 2854 addr = cma_src_addr(id_priv); 2855 port = htons(bind_list->port); 2856 2857 switch (addr->sa_family) { 2858 case AF_INET: 2859 ((struct sockaddr_in *) addr)->sin_port = port; 2860 break; 2861 case AF_INET6: 2862 ((struct sockaddr_in6 *) addr)->sin6_port = port; 2863 break; 2864 case AF_IB: 2865 sib = (struct sockaddr_ib *) addr; 2866 sid = be64_to_cpu(sib->sib_sid); 2867 mask = be64_to_cpu(sib->sib_sid_mask); 2868 sib->sib_sid = cpu_to_be64((sid & mask) | (u64) ntohs(port)); 2869 sib->sib_sid_mask = cpu_to_be64(~0ULL); 2870 break; 2871 } 2872 id_priv->bind_list = bind_list; 2873 hlist_add_head(&id_priv->node, &bind_list->owners); 2874 } 2875 2876 static int cma_alloc_port(enum rdma_port_space ps, 2877 struct rdma_id_private *id_priv, unsigned short snum) 2878 { 2879 struct rdma_bind_list *bind_list; 2880 int ret; 2881 2882 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); 2883 if (!bind_list) 2884 return -ENOMEM; 2885 2886 ret = cma_ps_alloc(id_priv->id.route.addr.dev_addr.net, ps, bind_list, 2887 snum); 2888 if (ret < 0) 2889 goto err; 2890 2891 bind_list->ps = ps; 2892 bind_list->port = (unsigned short)ret; 2893 cma_bind_port(bind_list, id_priv); 2894 return 0; 2895 err: 2896 kfree(bind_list); 2897 return ret == -ENOSPC ? -EADDRNOTAVAIL : ret; 2898 } 2899 2900 static int cma_alloc_any_port(enum rdma_port_space ps, 2901 struct rdma_id_private *id_priv) 2902 { 2903 static unsigned int last_used_port; 2904 int low, high, remaining; 2905 unsigned int rover; 2906 struct net *net = id_priv->id.route.addr.dev_addr.net; 2907 2908 inet_get_local_port_range(net, &low, &high); 2909 remaining = (high - low) + 1; 2910 rover = prandom_u32() % remaining + low; 2911 retry: 2912 if (last_used_port != rover && 2913 !cma_ps_find(net, ps, (unsigned short)rover)) { 2914 int ret = cma_alloc_port(ps, id_priv, rover); 2915 /* 2916 * Remember previously used port number in order to avoid 2917 * re-using same port immediately after it is closed. 2918 */ 2919 if (!ret) 2920 last_used_port = rover; 2921 if (ret != -EADDRNOTAVAIL) 2922 return ret; 2923 } 2924 if (--remaining) { 2925 rover++; 2926 if ((rover < low) || (rover > high)) 2927 rover = low; 2928 goto retry; 2929 } 2930 return -EADDRNOTAVAIL; 2931 } 2932 2933 /* 2934 * Check that the requested port is available. This is called when trying to 2935 * bind to a specific port, or when trying to listen on a bound port. In 2936 * the latter case, the provided id_priv may already be on the bind_list, but 2937 * we still need to check that it's okay to start listening. 2938 */ 2939 static int cma_check_port(struct rdma_bind_list *bind_list, 2940 struct rdma_id_private *id_priv, uint8_t reuseaddr) 2941 { 2942 struct rdma_id_private *cur_id; 2943 struct sockaddr *addr, *cur_addr; 2944 2945 addr = cma_src_addr(id_priv); 2946 hlist_for_each_entry(cur_id, &bind_list->owners, node) { 2947 if (id_priv == cur_id) 2948 continue; 2949 2950 if ((cur_id->state != RDMA_CM_LISTEN) && reuseaddr && 2951 cur_id->reuseaddr) 2952 continue; 2953 2954 cur_addr = cma_src_addr(cur_id); 2955 if (id_priv->afonly && cur_id->afonly && 2956 (addr->sa_family != cur_addr->sa_family)) 2957 continue; 2958 2959 if (cma_any_addr(addr) || cma_any_addr(cur_addr)) 2960 return -EADDRNOTAVAIL; 2961 2962 if (!cma_addr_cmp(addr, cur_addr)) 2963 return -EADDRINUSE; 2964 } 2965 return 0; 2966 } 2967 2968 static int cma_use_port(enum rdma_port_space ps, 2969 struct rdma_id_private *id_priv) 2970 { 2971 struct rdma_bind_list *bind_list; 2972 unsigned short snum; 2973 int ret; 2974 2975 snum = ntohs(cma_port(cma_src_addr(id_priv))); 2976 if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) 2977 return -EACCES; 2978 2979 bind_list = cma_ps_find(id_priv->id.route.addr.dev_addr.net, ps, snum); 2980 if (!bind_list) { 2981 ret = cma_alloc_port(ps, id_priv, snum); 2982 } else { 2983 ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr); 2984 if (!ret) 2985 cma_bind_port(bind_list, id_priv); 2986 } 2987 return ret; 2988 } 2989 2990 static int cma_bind_listen(struct rdma_id_private *id_priv) 2991 { 2992 struct rdma_bind_list *bind_list = id_priv->bind_list; 2993 int ret = 0; 2994 2995 mutex_lock(&lock); 2996 if (bind_list->owners.first->next) 2997 ret = cma_check_port(bind_list, id_priv, 0); 2998 mutex_unlock(&lock); 2999 return ret; 3000 } 3001 3002 static enum rdma_port_space cma_select_inet_ps( 3003 struct rdma_id_private *id_priv) 3004 { 3005 switch (id_priv->id.ps) { 3006 case RDMA_PS_TCP: 3007 case RDMA_PS_UDP: 3008 case RDMA_PS_IPOIB: 3009 case RDMA_PS_IB: 3010 return id_priv->id.ps; 3011 default: 3012 3013 return 0; 3014 } 3015 } 3016 3017 static enum rdma_port_space cma_select_ib_ps(struct rdma_id_private *id_priv) 3018 { 3019 enum rdma_port_space ps = 0; 3020 struct sockaddr_ib *sib; 3021 u64 sid_ps, mask, sid; 3022 3023 sib = (struct sockaddr_ib *) cma_src_addr(id_priv); 3024 mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK; 3025 sid = be64_to_cpu(sib->sib_sid) & mask; 3026 3027 if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) { 3028 sid_ps = RDMA_IB_IP_PS_IB; 3029 ps = RDMA_PS_IB; 3030 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) && 3031 (sid == (RDMA_IB_IP_PS_TCP & mask))) { 3032 sid_ps = RDMA_IB_IP_PS_TCP; 3033 ps = RDMA_PS_TCP; 3034 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) && 3035 (sid == (RDMA_IB_IP_PS_UDP & mask))) { 3036 sid_ps = RDMA_IB_IP_PS_UDP; 3037 ps = RDMA_PS_UDP; 3038 } 3039 3040 if (ps) { 3041 sib->sib_sid = cpu_to_be64(sid_ps | ntohs(cma_port((struct sockaddr *) sib))); 3042 sib->sib_sid_mask = cpu_to_be64(RDMA_IB_IP_PS_MASK | 3043 be64_to_cpu(sib->sib_sid_mask)); 3044 } 3045 return ps; 3046 } 3047 3048 static int cma_get_port(struct rdma_id_private *id_priv) 3049 { 3050 enum rdma_port_space ps; 3051 int ret; 3052 3053 if (cma_family(id_priv) != AF_IB) 3054 ps = cma_select_inet_ps(id_priv); 3055 else 3056 ps = cma_select_ib_ps(id_priv); 3057 if (!ps) 3058 return -EPROTONOSUPPORT; 3059 3060 mutex_lock(&lock); 3061 if (cma_any_port(cma_src_addr(id_priv))) 3062 ret = cma_alloc_any_port(ps, id_priv); 3063 else 3064 ret = cma_use_port(ps, id_priv); 3065 mutex_unlock(&lock); 3066 3067 return ret; 3068 } 3069 3070 static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, 3071 struct sockaddr *addr) 3072 { 3073 #if IS_ENABLED(CONFIG_IPV6) 3074 struct sockaddr_in6 *sin6; 3075 3076 if (addr->sa_family != AF_INET6) 3077 return 0; 3078 3079 sin6 = (struct sockaddr_in6 *) addr; 3080 3081 if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)) 3082 return 0; 3083 3084 if (!sin6->sin6_scope_id) 3085 return -EINVAL; 3086 3087 dev_addr->bound_dev_if = sin6->sin6_scope_id; 3088 #endif 3089 return 0; 3090 } 3091 3092 int rdma_listen(struct rdma_cm_id *id, int backlog) 3093 { 3094 struct rdma_id_private *id_priv; 3095 int ret; 3096 3097 id_priv = container_of(id, struct rdma_id_private, id); 3098 if (id_priv->state == RDMA_CM_IDLE) { 3099 id->route.addr.src_addr.ss_family = AF_INET; 3100 ret = rdma_bind_addr(id, cma_src_addr(id_priv)); 3101 if (ret) 3102 return ret; 3103 } 3104 3105 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) 3106 return -EINVAL; 3107 3108 if (id_priv->reuseaddr) { 3109 ret = cma_bind_listen(id_priv); 3110 if (ret) 3111 goto err; 3112 } 3113 3114 id_priv->backlog = backlog; 3115 if (id->device) { 3116 if (rdma_cap_ib_cm(id->device, 1)) { 3117 ret = cma_ib_listen(id_priv); 3118 if (ret) 3119 goto err; 3120 } else if (rdma_cap_iw_cm(id->device, 1)) { 3121 ret = cma_iw_listen(id_priv, backlog); 3122 if (ret) 3123 goto err; 3124 } else { 3125 ret = -ENOSYS; 3126 goto err; 3127 } 3128 } else 3129 cma_listen_on_all(id_priv); 3130 3131 return 0; 3132 err: 3133 id_priv->backlog = 0; 3134 cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND); 3135 return ret; 3136 } 3137 EXPORT_SYMBOL(rdma_listen); 3138 3139 int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) 3140 { 3141 struct rdma_id_private *id_priv; 3142 int ret; 3143 3144 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 && 3145 addr->sa_family != AF_IB) 3146 return -EAFNOSUPPORT; 3147 3148 id_priv = container_of(id, struct rdma_id_private, id); 3149 if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND)) 3150 return -EINVAL; 3151 3152 ret = cma_check_linklocal(&id->route.addr.dev_addr, addr); 3153 if (ret) 3154 goto err1; 3155 3156 memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr)); 3157 if (!cma_any_addr(addr)) { 3158 ret = cma_translate_addr(addr, &id->route.addr.dev_addr); 3159 if (ret) 3160 goto err1; 3161 3162 ret = cma_acquire_dev(id_priv, NULL); 3163 if (ret) 3164 goto err1; 3165 } 3166 3167 if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) { 3168 if (addr->sa_family == AF_INET) 3169 id_priv->afonly = 1; 3170 #if IS_ENABLED(CONFIG_IPV6) 3171 else if (addr->sa_family == AF_INET6) { 3172 struct net *net = id_priv->id.route.addr.dev_addr.net; 3173 3174 id_priv->afonly = net->ipv6.sysctl.bindv6only; 3175 } 3176 #endif 3177 } 3178 ret = cma_get_port(id_priv); 3179 if (ret) 3180 goto err2; 3181 3182 return 0; 3183 err2: 3184 if (id_priv->cma_dev) 3185 cma_release_dev(id_priv); 3186 err1: 3187 cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE); 3188 return ret; 3189 } 3190 EXPORT_SYMBOL(rdma_bind_addr); 3191 3192 static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv) 3193 { 3194 struct cma_hdr *cma_hdr; 3195 3196 cma_hdr = hdr; 3197 cma_hdr->cma_version = CMA_VERSION; 3198 if (cma_family(id_priv) == AF_INET) { 3199 struct sockaddr_in *src4, *dst4; 3200 3201 src4 = (struct sockaddr_in *) cma_src_addr(id_priv); 3202 dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv); 3203 3204 cma_set_ip_ver(cma_hdr, 4); 3205 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; 3206 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; 3207 cma_hdr->port = src4->sin_port; 3208 } else if (cma_family(id_priv) == AF_INET6) { 3209 struct sockaddr_in6 *src6, *dst6; 3210 3211 src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); 3212 dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv); 3213 3214 cma_set_ip_ver(cma_hdr, 6); 3215 cma_hdr->src_addr.ip6 = src6->sin6_addr; 3216 cma_hdr->dst_addr.ip6 = dst6->sin6_addr; 3217 cma_hdr->port = src6->sin6_port; 3218 } 3219 return 0; 3220 } 3221 3222 static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, 3223 struct ib_cm_event *ib_event) 3224 { 3225 struct rdma_id_private *id_priv = cm_id->context; 3226 struct rdma_cm_event event; 3227 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; 3228 int ret = 0; 3229 3230 mutex_lock(&id_priv->handler_mutex); 3231 if (id_priv->state != RDMA_CM_CONNECT) 3232 goto out; 3233 3234 memset(&event, 0, sizeof event); 3235 switch (ib_event->event) { 3236 case IB_CM_SIDR_REQ_ERROR: 3237 event.event = RDMA_CM_EVENT_UNREACHABLE; 3238 event.status = -ETIMEDOUT; 3239 break; 3240 case IB_CM_SIDR_REP_RECEIVED: 3241 event.param.ud.private_data = ib_event->private_data; 3242 event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE; 3243 if (rep->status != IB_SIDR_SUCCESS) { 3244 event.event = RDMA_CM_EVENT_UNREACHABLE; 3245 event.status = ib_event->param.sidr_rep_rcvd.status; 3246 break; 3247 } 3248 ret = cma_set_qkey(id_priv, rep->qkey); 3249 if (ret) { 3250 event.event = RDMA_CM_EVENT_ADDR_ERROR; 3251 event.status = ret; 3252 break; 3253 } 3254 ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num, 3255 id_priv->id.route.path_rec, 3256 &event.param.ud.ah_attr); 3257 event.param.ud.qp_num = rep->qpn; 3258 event.param.ud.qkey = rep->qkey; 3259 event.event = RDMA_CM_EVENT_ESTABLISHED; 3260 event.status = 0; 3261 break; 3262 default: 3263 pr_err("RDMA CMA: unexpected IB CM event: %d\n", 3264 ib_event->event); 3265 goto out; 3266 } 3267 3268 ret = id_priv->id.event_handler(&id_priv->id, &event); 3269 if (ret) { 3270 /* Destroy the CM ID by returning a non-zero value. */ 3271 id_priv->cm_id.ib = NULL; 3272 cma_exch(id_priv, RDMA_CM_DESTROYING); 3273 mutex_unlock(&id_priv->handler_mutex); 3274 rdma_destroy_id(&id_priv->id); 3275 return ret; 3276 } 3277 out: 3278 mutex_unlock(&id_priv->handler_mutex); 3279 return ret; 3280 } 3281 3282 static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, 3283 struct rdma_conn_param *conn_param) 3284 { 3285 struct ib_cm_sidr_req_param req; 3286 struct ib_cm_id *id; 3287 void *private_data; 3288 int offset, ret; 3289 3290 memset(&req, 0, sizeof req); 3291 offset = cma_user_data_offset(id_priv); 3292 req.private_data_len = offset + conn_param->private_data_len; 3293 if (req.private_data_len < conn_param->private_data_len) 3294 return -EINVAL; 3295 3296 if (req.private_data_len) { 3297 private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 3298 if (!private_data) 3299 return -ENOMEM; 3300 } else { 3301 private_data = NULL; 3302 } 3303 3304 if (conn_param->private_data && conn_param->private_data_len) 3305 memcpy(private_data + offset, conn_param->private_data, 3306 conn_param->private_data_len); 3307 3308 if (private_data) { 3309 ret = cma_format_hdr(private_data, id_priv); 3310 if (ret) 3311 goto out; 3312 req.private_data = private_data; 3313 } 3314 3315 id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler, 3316 id_priv); 3317 if (IS_ERR(id)) { 3318 ret = PTR_ERR(id); 3319 goto out; 3320 } 3321 id_priv->cm_id.ib = id; 3322 3323 req.path = id_priv->id.route.path_rec; 3324 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); 3325 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); 3326 req.max_cm_retries = CMA_MAX_CM_RETRIES; 3327 3328 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req); 3329 if (ret) { 3330 ib_destroy_cm_id(id_priv->cm_id.ib); 3331 id_priv->cm_id.ib = NULL; 3332 } 3333 out: 3334 kfree(private_data); 3335 return ret; 3336 } 3337 3338 static int cma_connect_ib(struct rdma_id_private *id_priv, 3339 struct rdma_conn_param *conn_param) 3340 { 3341 struct ib_cm_req_param req; 3342 struct rdma_route *route; 3343 void *private_data; 3344 struct ib_cm_id *id; 3345 int offset, ret; 3346 3347 memset(&req, 0, sizeof req); 3348 offset = cma_user_data_offset(id_priv); 3349 req.private_data_len = offset + conn_param->private_data_len; 3350 if (req.private_data_len < conn_param->private_data_len) 3351 return -EINVAL; 3352 3353 if (req.private_data_len) { 3354 private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 3355 if (!private_data) 3356 return -ENOMEM; 3357 } else { 3358 private_data = NULL; 3359 } 3360 3361 if (conn_param->private_data && conn_param->private_data_len) 3362 memcpy(private_data + offset, conn_param->private_data, 3363 conn_param->private_data_len); 3364 3365 id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv); 3366 if (IS_ERR(id)) { 3367 ret = PTR_ERR(id); 3368 goto out; 3369 } 3370 id_priv->cm_id.ib = id; 3371 3372 route = &id_priv->id.route; 3373 if (private_data) { 3374 ret = cma_format_hdr(private_data, id_priv); 3375 if (ret) 3376 goto out; 3377 req.private_data = private_data; 3378 } 3379 3380 req.primary_path = &route->path_rec[0]; 3381 if (route->num_paths == 2) 3382 req.alternate_path = &route->path_rec[1]; 3383 3384 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); 3385 req.qp_num = id_priv->qp_num; 3386 req.qp_type = id_priv->id.qp_type; 3387 req.starting_psn = id_priv->seq_num; 3388 req.responder_resources = conn_param->responder_resources; 3389 req.initiator_depth = conn_param->initiator_depth; 3390 req.flow_control = conn_param->flow_control; 3391 req.retry_count = min_t(u8, 7, conn_param->retry_count); 3392 req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); 3393 req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 3394 req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 3395 req.max_cm_retries = CMA_MAX_CM_RETRIES; 3396 req.srq = id_priv->srq ? 1 : 0; 3397 3398 ret = ib_send_cm_req(id_priv->cm_id.ib, &req); 3399 out: 3400 if (ret && !IS_ERR(id)) { 3401 ib_destroy_cm_id(id); 3402 id_priv->cm_id.ib = NULL; 3403 } 3404 3405 kfree(private_data); 3406 return ret; 3407 } 3408 3409 static int cma_connect_iw(struct rdma_id_private *id_priv, 3410 struct rdma_conn_param *conn_param) 3411 { 3412 struct iw_cm_id *cm_id; 3413 int ret; 3414 struct iw_cm_conn_param iw_param; 3415 3416 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv); 3417 if (IS_ERR(cm_id)) 3418 return PTR_ERR(cm_id); 3419 3420 cm_id->tos = id_priv->tos; 3421 id_priv->cm_id.iw = cm_id; 3422 3423 memcpy(&cm_id->local_addr, cma_src_addr(id_priv), 3424 rdma_addr_size(cma_src_addr(id_priv))); 3425 memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv), 3426 rdma_addr_size(cma_dst_addr(id_priv))); 3427 3428 ret = cma_modify_qp_rtr(id_priv, conn_param); 3429 if (ret) 3430 goto out; 3431 3432 if (conn_param) { 3433 iw_param.ord = conn_param->initiator_depth; 3434 iw_param.ird = conn_param->responder_resources; 3435 iw_param.private_data = conn_param->private_data; 3436 iw_param.private_data_len = conn_param->private_data_len; 3437 iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num; 3438 } else { 3439 memset(&iw_param, 0, sizeof iw_param); 3440 iw_param.qpn = id_priv->qp_num; 3441 } 3442 ret = iw_cm_connect(cm_id, &iw_param); 3443 out: 3444 if (ret) { 3445 iw_destroy_cm_id(cm_id); 3446 id_priv->cm_id.iw = NULL; 3447 } 3448 return ret; 3449 } 3450 3451 int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 3452 { 3453 struct rdma_id_private *id_priv; 3454 int ret; 3455 3456 id_priv = container_of(id, struct rdma_id_private, id); 3457 if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT)) 3458 return -EINVAL; 3459 3460 if (!id->qp) { 3461 id_priv->qp_num = conn_param->qp_num; 3462 id_priv->srq = conn_param->srq; 3463 } 3464 3465 if (rdma_cap_ib_cm(id->device, id->port_num)) { 3466 if (id->qp_type == IB_QPT_UD) 3467 ret = cma_resolve_ib_udp(id_priv, conn_param); 3468 else 3469 ret = cma_connect_ib(id_priv, conn_param); 3470 } else if (rdma_cap_iw_cm(id->device, id->port_num)) 3471 ret = cma_connect_iw(id_priv, conn_param); 3472 else 3473 ret = -ENOSYS; 3474 if (ret) 3475 goto err; 3476 3477 return 0; 3478 err: 3479 cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED); 3480 return ret; 3481 } 3482 EXPORT_SYMBOL(rdma_connect); 3483 3484 static int cma_accept_ib(struct rdma_id_private *id_priv, 3485 struct rdma_conn_param *conn_param) 3486 { 3487 struct ib_cm_rep_param rep; 3488 int ret; 3489 3490 ret = cma_modify_qp_rtr(id_priv, conn_param); 3491 if (ret) 3492 goto out; 3493 3494 ret = cma_modify_qp_rts(id_priv, conn_param); 3495 if (ret) 3496 goto out; 3497 3498 memset(&rep, 0, sizeof rep); 3499 rep.qp_num = id_priv->qp_num; 3500 rep.starting_psn = id_priv->seq_num; 3501 rep.private_data = conn_param->private_data; 3502 rep.private_data_len = conn_param->private_data_len; 3503 rep.responder_resources = conn_param->responder_resources; 3504 rep.initiator_depth = conn_param->initiator_depth; 3505 rep.failover_accepted = 0; 3506 rep.flow_control = conn_param->flow_control; 3507 rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); 3508 rep.srq = id_priv->srq ? 1 : 0; 3509 3510 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); 3511 out: 3512 return ret; 3513 } 3514 3515 static int cma_accept_iw(struct rdma_id_private *id_priv, 3516 struct rdma_conn_param *conn_param) 3517 { 3518 struct iw_cm_conn_param iw_param; 3519 int ret; 3520 3521 ret = cma_modify_qp_rtr(id_priv, conn_param); 3522 if (ret) 3523 return ret; 3524 3525 iw_param.ord = conn_param->initiator_depth; 3526 iw_param.ird = conn_param->responder_resources; 3527 iw_param.private_data = conn_param->private_data; 3528 iw_param.private_data_len = conn_param->private_data_len; 3529 if (id_priv->id.qp) { 3530 iw_param.qpn = id_priv->qp_num; 3531 } else 3532 iw_param.qpn = conn_param->qp_num; 3533 3534 return iw_cm_accept(id_priv->cm_id.iw, &iw_param); 3535 } 3536 3537 static int cma_send_sidr_rep(struct rdma_id_private *id_priv, 3538 enum ib_cm_sidr_status status, u32 qkey, 3539 const void *private_data, int private_data_len) 3540 { 3541 struct ib_cm_sidr_rep_param rep; 3542 int ret; 3543 3544 memset(&rep, 0, sizeof rep); 3545 rep.status = status; 3546 if (status == IB_SIDR_SUCCESS) { 3547 ret = cma_set_qkey(id_priv, qkey); 3548 if (ret) 3549 return ret; 3550 rep.qp_num = id_priv->qp_num; 3551 rep.qkey = id_priv->qkey; 3552 } 3553 rep.private_data = private_data; 3554 rep.private_data_len = private_data_len; 3555 3556 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep); 3557 } 3558 3559 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 3560 { 3561 struct rdma_id_private *id_priv; 3562 int ret; 3563 3564 id_priv = container_of(id, struct rdma_id_private, id); 3565 3566 id_priv->owner = task_pid_nr(current); 3567 3568 if (!cma_comp(id_priv, RDMA_CM_CONNECT)) 3569 return -EINVAL; 3570 3571 if (!id->qp && conn_param) { 3572 id_priv->qp_num = conn_param->qp_num; 3573 id_priv->srq = conn_param->srq; 3574 } 3575 3576 if (rdma_cap_ib_cm(id->device, id->port_num)) { 3577 if (id->qp_type == IB_QPT_UD) { 3578 if (conn_param) 3579 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 3580 conn_param->qkey, 3581 conn_param->private_data, 3582 conn_param->private_data_len); 3583 else 3584 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 3585 0, NULL, 0); 3586 } else { 3587 if (conn_param) 3588 ret = cma_accept_ib(id_priv, conn_param); 3589 else 3590 ret = cma_rep_recv(id_priv); 3591 } 3592 } else if (rdma_cap_iw_cm(id->device, id->port_num)) 3593 ret = cma_accept_iw(id_priv, conn_param); 3594 else 3595 ret = -ENOSYS; 3596 3597 if (ret) 3598 goto reject; 3599 3600 return 0; 3601 reject: 3602 cma_modify_qp_err(id_priv); 3603 rdma_reject(id, NULL, 0); 3604 return ret; 3605 } 3606 EXPORT_SYMBOL(rdma_accept); 3607 3608 int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event) 3609 { 3610 struct rdma_id_private *id_priv; 3611 int ret; 3612 3613 id_priv = container_of(id, struct rdma_id_private, id); 3614 if (!id_priv->cm_id.ib) 3615 return -EINVAL; 3616 3617 switch (id->device->node_type) { 3618 case RDMA_NODE_IB_CA: 3619 ret = ib_cm_notify(id_priv->cm_id.ib, event); 3620 break; 3621 default: 3622 ret = 0; 3623 break; 3624 } 3625 return ret; 3626 } 3627 EXPORT_SYMBOL(rdma_notify); 3628 3629 int rdma_reject(struct rdma_cm_id *id, const void *private_data, 3630 u8 private_data_len) 3631 { 3632 struct rdma_id_private *id_priv; 3633 int ret; 3634 3635 id_priv = container_of(id, struct rdma_id_private, id); 3636 if (!id_priv->cm_id.ib) 3637 return -EINVAL; 3638 3639 if (rdma_cap_ib_cm(id->device, id->port_num)) { 3640 if (id->qp_type == IB_QPT_UD) 3641 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0, 3642 private_data, private_data_len); 3643 else 3644 ret = ib_send_cm_rej(id_priv->cm_id.ib, 3645 IB_CM_REJ_CONSUMER_DEFINED, NULL, 3646 0, private_data, private_data_len); 3647 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 3648 ret = iw_cm_reject(id_priv->cm_id.iw, 3649 private_data, private_data_len); 3650 } else 3651 ret = -ENOSYS; 3652 3653 return ret; 3654 } 3655 EXPORT_SYMBOL(rdma_reject); 3656 3657 int rdma_disconnect(struct rdma_cm_id *id) 3658 { 3659 struct rdma_id_private *id_priv; 3660 int ret; 3661 3662 id_priv = container_of(id, struct rdma_id_private, id); 3663 if (!id_priv->cm_id.ib) 3664 return -EINVAL; 3665 3666 if (rdma_cap_ib_cm(id->device, id->port_num)) { 3667 ret = cma_modify_qp_err(id_priv); 3668 if (ret) 3669 goto out; 3670 /* Initiate or respond to a disconnect. */ 3671 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) 3672 ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0); 3673 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 3674 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); 3675 } else 3676 ret = -EINVAL; 3677 3678 out: 3679 return ret; 3680 } 3681 EXPORT_SYMBOL(rdma_disconnect); 3682 3683 static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) 3684 { 3685 struct rdma_id_private *id_priv; 3686 struct cma_multicast *mc = multicast->context; 3687 struct rdma_cm_event event; 3688 int ret = 0; 3689 3690 id_priv = mc->id_priv; 3691 mutex_lock(&id_priv->handler_mutex); 3692 if (id_priv->state != RDMA_CM_ADDR_BOUND && 3693 id_priv->state != RDMA_CM_ADDR_RESOLVED) 3694 goto out; 3695 3696 if (!status) 3697 status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); 3698 mutex_lock(&id_priv->qp_mutex); 3699 if (!status && id_priv->id.qp) 3700 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid, 3701 be16_to_cpu(multicast->rec.mlid)); 3702 mutex_unlock(&id_priv->qp_mutex); 3703 3704 memset(&event, 0, sizeof event); 3705 event.status = status; 3706 event.param.ud.private_data = mc->context; 3707 if (!status) { 3708 struct rdma_dev_addr *dev_addr = 3709 &id_priv->id.route.addr.dev_addr; 3710 struct net_device *ndev = 3711 dev_get_by_index(&init_net, dev_addr->bound_dev_if); 3712 enum ib_gid_type gid_type = 3713 id_priv->cma_dev->default_gid_type[id_priv->id.port_num - 3714 rdma_start_port(id_priv->cma_dev->device)]; 3715 3716 event.event = RDMA_CM_EVENT_MULTICAST_JOIN; 3717 ib_init_ah_from_mcmember(id_priv->id.device, 3718 id_priv->id.port_num, &multicast->rec, 3719 ndev, gid_type, 3720 &event.param.ud.ah_attr); 3721 event.param.ud.qp_num = 0xFFFFFF; 3722 event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey); 3723 if (ndev) 3724 dev_put(ndev); 3725 } else 3726 event.event = RDMA_CM_EVENT_MULTICAST_ERROR; 3727 3728 ret = id_priv->id.event_handler(&id_priv->id, &event); 3729 if (ret) { 3730 cma_exch(id_priv, RDMA_CM_DESTROYING); 3731 mutex_unlock(&id_priv->handler_mutex); 3732 rdma_destroy_id(&id_priv->id); 3733 return 0; 3734 } 3735 3736 out: 3737 mutex_unlock(&id_priv->handler_mutex); 3738 return 0; 3739 } 3740 3741 static void cma_set_mgid(struct rdma_id_private *id_priv, 3742 struct sockaddr *addr, union ib_gid *mgid) 3743 { 3744 unsigned char mc_map[MAX_ADDR_LEN]; 3745 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 3746 struct sockaddr_in *sin = (struct sockaddr_in *) addr; 3747 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr; 3748 3749 if (cma_any_addr(addr)) { 3750 memset(mgid, 0, sizeof *mgid); 3751 } else if ((addr->sa_family == AF_INET6) && 3752 ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) == 3753 0xFF10A01B)) { 3754 /* IPv6 address is an SA assigned MGID. */ 3755 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); 3756 } else if (addr->sa_family == AF_IB) { 3757 memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid); 3758 } else if ((addr->sa_family == AF_INET6)) { 3759 ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map); 3760 if (id_priv->id.ps == RDMA_PS_UDP) 3761 mc_map[7] = 0x01; /* Use RDMA CM signature */ 3762 *mgid = *(union ib_gid *) (mc_map + 4); 3763 } else { 3764 ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map); 3765 if (id_priv->id.ps == RDMA_PS_UDP) 3766 mc_map[7] = 0x01; /* Use RDMA CM signature */ 3767 *mgid = *(union ib_gid *) (mc_map + 4); 3768 } 3769 } 3770 3771 static void cma_query_sa_classport_info_cb(int status, 3772 struct ib_class_port_info *rec, 3773 void *context) 3774 { 3775 struct class_port_info_context *cb_ctx = context; 3776 3777 WARN_ON(!context); 3778 3779 if (status || !rec) { 3780 pr_debug("RDMA CM: %s port %u failed query ClassPortInfo status: %d\n", 3781 cb_ctx->device->name, cb_ctx->port_num, status); 3782 goto out; 3783 } 3784 3785 memcpy(cb_ctx->class_port_info, rec, sizeof(struct ib_class_port_info)); 3786 3787 out: 3788 complete(&cb_ctx->done); 3789 } 3790 3791 static int cma_query_sa_classport_info(struct ib_device *device, u8 port_num, 3792 struct ib_class_port_info *class_port_info) 3793 { 3794 struct class_port_info_context *cb_ctx; 3795 int ret; 3796 3797 cb_ctx = kmalloc(sizeof(*cb_ctx), GFP_KERNEL); 3798 if (!cb_ctx) 3799 return -ENOMEM; 3800 3801 cb_ctx->device = device; 3802 cb_ctx->class_port_info = class_port_info; 3803 cb_ctx->port_num = port_num; 3804 init_completion(&cb_ctx->done); 3805 3806 ret = ib_sa_classport_info_rec_query(&sa_client, device, port_num, 3807 CMA_QUERY_CLASSPORT_INFO_TIMEOUT, 3808 GFP_KERNEL, cma_query_sa_classport_info_cb, 3809 cb_ctx, &cb_ctx->sa_query); 3810 if (ret < 0) { 3811 pr_err("RDMA CM: %s port %u failed to send ClassPortInfo query, ret: %d\n", 3812 device->name, port_num, ret); 3813 goto out; 3814 } 3815 3816 wait_for_completion(&cb_ctx->done); 3817 3818 out: 3819 kfree(cb_ctx); 3820 return ret; 3821 } 3822 3823 static int cma_join_ib_multicast(struct rdma_id_private *id_priv, 3824 struct cma_multicast *mc) 3825 { 3826 struct ib_sa_mcmember_rec rec; 3827 struct ib_class_port_info class_port_info; 3828 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 3829 ib_sa_comp_mask comp_mask; 3830 int ret; 3831 3832 ib_addr_get_mgid(dev_addr, &rec.mgid); 3833 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num, 3834 &rec.mgid, &rec); 3835 if (ret) 3836 return ret; 3837 3838 ret = cma_set_qkey(id_priv, 0); 3839 if (ret) 3840 return ret; 3841 3842 cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid); 3843 rec.qkey = cpu_to_be32(id_priv->qkey); 3844 rdma_addr_get_sgid(dev_addr, &rec.port_gid); 3845 rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 3846 rec.join_state = mc->join_state; 3847 3848 if (rec.join_state == BIT(SENDONLY_FULLMEMBER_JOIN)) { 3849 ret = cma_query_sa_classport_info(id_priv->id.device, 3850 id_priv->id.port_num, 3851 &class_port_info); 3852 3853 if (ret) 3854 return ret; 3855 3856 if (!(ib_get_cpi_capmask2(&class_port_info) & 3857 IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT)) { 3858 pr_warn("RDMA CM: %s port %u Unable to multicast join\n" 3859 "RDMA CM: SM doesn't support Send Only Full Member option\n", 3860 id_priv->id.device->name, id_priv->id.port_num); 3861 return -EOPNOTSUPP; 3862 } 3863 } 3864 3865 comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | 3866 IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE | 3867 IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL | 3868 IB_SA_MCMEMBER_REC_FLOW_LABEL | 3869 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; 3870 3871 if (id_priv->id.ps == RDMA_PS_IPOIB) 3872 comp_mask |= IB_SA_MCMEMBER_REC_RATE | 3873 IB_SA_MCMEMBER_REC_RATE_SELECTOR | 3874 IB_SA_MCMEMBER_REC_MTU_SELECTOR | 3875 IB_SA_MCMEMBER_REC_MTU | 3876 IB_SA_MCMEMBER_REC_HOP_LIMIT; 3877 3878 mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device, 3879 id_priv->id.port_num, &rec, 3880 comp_mask, GFP_KERNEL, 3881 cma_ib_mc_handler, mc); 3882 return PTR_ERR_OR_ZERO(mc->multicast.ib); 3883 } 3884 3885 static void iboe_mcast_work_handler(struct work_struct *work) 3886 { 3887 struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work); 3888 struct cma_multicast *mc = mw->mc; 3889 struct ib_sa_multicast *m = mc->multicast.ib; 3890 3891 mc->multicast.ib->context = mc; 3892 cma_ib_mc_handler(0, m); 3893 kref_put(&mc->mcref, release_mc); 3894 kfree(mw); 3895 } 3896 3897 static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid) 3898 { 3899 struct sockaddr_in *sin = (struct sockaddr_in *)addr; 3900 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr; 3901 3902 if (cma_any_addr(addr)) { 3903 memset(mgid, 0, sizeof *mgid); 3904 } else if (addr->sa_family == AF_INET6) { 3905 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); 3906 } else { 3907 mgid->raw[0] = 0xff; 3908 mgid->raw[1] = 0x0e; 3909 mgid->raw[2] = 0; 3910 mgid->raw[3] = 0; 3911 mgid->raw[4] = 0; 3912 mgid->raw[5] = 0; 3913 mgid->raw[6] = 0; 3914 mgid->raw[7] = 0; 3915 mgid->raw[8] = 0; 3916 mgid->raw[9] = 0; 3917 mgid->raw[10] = 0xff; 3918 mgid->raw[11] = 0xff; 3919 *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr; 3920 } 3921 } 3922 3923 static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, 3924 struct cma_multicast *mc) 3925 { 3926 struct iboe_mcast_work *work; 3927 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 3928 int err = 0; 3929 struct sockaddr *addr = (struct sockaddr *)&mc->addr; 3930 struct net_device *ndev = NULL; 3931 enum ib_gid_type gid_type; 3932 bool send_only; 3933 3934 send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN); 3935 3936 if (cma_zero_addr((struct sockaddr *)&mc->addr)) 3937 return -EINVAL; 3938 3939 work = kzalloc(sizeof *work, GFP_KERNEL); 3940 if (!work) 3941 return -ENOMEM; 3942 3943 mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL); 3944 if (!mc->multicast.ib) { 3945 err = -ENOMEM; 3946 goto out1; 3947 } 3948 3949 cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid); 3950 3951 mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff); 3952 if (id_priv->id.ps == RDMA_PS_UDP) 3953 mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); 3954 3955 if (dev_addr->bound_dev_if) 3956 ndev = dev_get_by_index(&init_net, dev_addr->bound_dev_if); 3957 if (!ndev) { 3958 err = -ENODEV; 3959 goto out2; 3960 } 3961 mc->multicast.ib->rec.rate = iboe_get_rate(ndev); 3962 mc->multicast.ib->rec.hop_limit = 1; 3963 mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->mtu); 3964 3965 gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num - 3966 rdma_start_port(id_priv->cma_dev->device)]; 3967 if (addr->sa_family == AF_INET) { 3968 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) { 3969 mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT; 3970 if (!send_only) { 3971 err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, 3972 true); 3973 if (!err) 3974 mc->igmp_joined = true; 3975 } 3976 } 3977 } else { 3978 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) 3979 err = -ENOTSUPP; 3980 } 3981 dev_put(ndev); 3982 if (err || !mc->multicast.ib->rec.mtu) { 3983 if (!err) 3984 err = -EINVAL; 3985 goto out2; 3986 } 3987 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, 3988 &mc->multicast.ib->rec.port_gid); 3989 work->id = id_priv; 3990 work->mc = mc; 3991 INIT_WORK(&work->work, iboe_mcast_work_handler); 3992 kref_get(&mc->mcref); 3993 queue_work(cma_wq, &work->work); 3994 3995 return 0; 3996 3997 out2: 3998 kfree(mc->multicast.ib); 3999 out1: 4000 kfree(work); 4001 return err; 4002 } 4003 4004 int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, 4005 u8 join_state, void *context) 4006 { 4007 struct rdma_id_private *id_priv; 4008 struct cma_multicast *mc; 4009 int ret; 4010 4011 id_priv = container_of(id, struct rdma_id_private, id); 4012 if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) && 4013 !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED)) 4014 return -EINVAL; 4015 4016 mc = kmalloc(sizeof *mc, GFP_KERNEL); 4017 if (!mc) 4018 return -ENOMEM; 4019 4020 memcpy(&mc->addr, addr, rdma_addr_size(addr)); 4021 mc->context = context; 4022 mc->id_priv = id_priv; 4023 mc->igmp_joined = false; 4024 mc->join_state = join_state; 4025 spin_lock(&id_priv->lock); 4026 list_add(&mc->list, &id_priv->mc_list); 4027 spin_unlock(&id_priv->lock); 4028 4029 if (rdma_protocol_roce(id->device, id->port_num)) { 4030 kref_init(&mc->mcref); 4031 ret = cma_iboe_join_multicast(id_priv, mc); 4032 } else if (rdma_cap_ib_mcast(id->device, id->port_num)) 4033 ret = cma_join_ib_multicast(id_priv, mc); 4034 else 4035 ret = -ENOSYS; 4036 4037 if (ret) { 4038 spin_lock_irq(&id_priv->lock); 4039 list_del(&mc->list); 4040 spin_unlock_irq(&id_priv->lock); 4041 kfree(mc); 4042 } 4043 return ret; 4044 } 4045 EXPORT_SYMBOL(rdma_join_multicast); 4046 4047 void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) 4048 { 4049 struct rdma_id_private *id_priv; 4050 struct cma_multicast *mc; 4051 4052 id_priv = container_of(id, struct rdma_id_private, id); 4053 spin_lock_irq(&id_priv->lock); 4054 list_for_each_entry(mc, &id_priv->mc_list, list) { 4055 if (!memcmp(&mc->addr, addr, rdma_addr_size(addr))) { 4056 list_del(&mc->list); 4057 spin_unlock_irq(&id_priv->lock); 4058 4059 if (id->qp) 4060 ib_detach_mcast(id->qp, 4061 &mc->multicast.ib->rec.mgid, 4062 be16_to_cpu(mc->multicast.ib->rec.mlid)); 4063 4064 BUG_ON(id_priv->cma_dev->device != id->device); 4065 4066 if (rdma_cap_ib_mcast(id->device, id->port_num)) { 4067 ib_sa_free_multicast(mc->multicast.ib); 4068 kfree(mc); 4069 } else if (rdma_protocol_roce(id->device, id->port_num)) { 4070 if (mc->igmp_joined) { 4071 struct rdma_dev_addr *dev_addr = 4072 &id->route.addr.dev_addr; 4073 struct net_device *ndev = NULL; 4074 4075 if (dev_addr->bound_dev_if) 4076 ndev = dev_get_by_index(&init_net, 4077 dev_addr->bound_dev_if); 4078 if (ndev) { 4079 cma_igmp_send(ndev, 4080 &mc->multicast.ib->rec.mgid, 4081 false); 4082 dev_put(ndev); 4083 } 4084 mc->igmp_joined = false; 4085 } 4086 kref_put(&mc->mcref, release_mc); 4087 } 4088 return; 4089 } 4090 } 4091 spin_unlock_irq(&id_priv->lock); 4092 } 4093 EXPORT_SYMBOL(rdma_leave_multicast); 4094 4095 static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv) 4096 { 4097 struct rdma_dev_addr *dev_addr; 4098 struct cma_ndev_work *work; 4099 4100 dev_addr = &id_priv->id.route.addr.dev_addr; 4101 4102 if ((dev_addr->bound_dev_if == ndev->ifindex) && 4103 (net_eq(dev_net(ndev), dev_addr->net)) && 4104 memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) { 4105 pr_info("RDMA CM addr change for ndev %s used by id %p\n", 4106 ndev->name, &id_priv->id); 4107 work = kzalloc(sizeof *work, GFP_KERNEL); 4108 if (!work) 4109 return -ENOMEM; 4110 4111 INIT_WORK(&work->work, cma_ndev_work_handler); 4112 work->id = id_priv; 4113 work->event.event = RDMA_CM_EVENT_ADDR_CHANGE; 4114 atomic_inc(&id_priv->refcount); 4115 queue_work(cma_wq, &work->work); 4116 } 4117 4118 return 0; 4119 } 4120 4121 static int cma_netdev_callback(struct notifier_block *self, unsigned long event, 4122 void *ptr) 4123 { 4124 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 4125 struct cma_device *cma_dev; 4126 struct rdma_id_private *id_priv; 4127 int ret = NOTIFY_DONE; 4128 4129 if (event != NETDEV_BONDING_FAILOVER) 4130 return NOTIFY_DONE; 4131 4132 if (!(ndev->flags & IFF_MASTER) || !(ndev->priv_flags & IFF_BONDING)) 4133 return NOTIFY_DONE; 4134 4135 mutex_lock(&lock); 4136 list_for_each_entry(cma_dev, &dev_list, list) 4137 list_for_each_entry(id_priv, &cma_dev->id_list, list) { 4138 ret = cma_netdev_change(ndev, id_priv); 4139 if (ret) 4140 goto out; 4141 } 4142 4143 out: 4144 mutex_unlock(&lock); 4145 return ret; 4146 } 4147 4148 static struct notifier_block cma_nb = { 4149 .notifier_call = cma_netdev_callback 4150 }; 4151 4152 static void cma_add_one(struct ib_device *device) 4153 { 4154 struct cma_device *cma_dev; 4155 struct rdma_id_private *id_priv; 4156 unsigned int i; 4157 unsigned long supported_gids = 0; 4158 4159 cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL); 4160 if (!cma_dev) 4161 return; 4162 4163 cma_dev->device = device; 4164 cma_dev->default_gid_type = kcalloc(device->phys_port_cnt, 4165 sizeof(*cma_dev->default_gid_type), 4166 GFP_KERNEL); 4167 if (!cma_dev->default_gid_type) { 4168 kfree(cma_dev); 4169 return; 4170 } 4171 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) { 4172 supported_gids = roce_gid_type_mask_support(device, i); 4173 WARN_ON(!supported_gids); 4174 cma_dev->default_gid_type[i - rdma_start_port(device)] = 4175 find_first_bit(&supported_gids, BITS_PER_LONG); 4176 } 4177 4178 init_completion(&cma_dev->comp); 4179 atomic_set(&cma_dev->refcount, 1); 4180 INIT_LIST_HEAD(&cma_dev->id_list); 4181 ib_set_client_data(device, &cma_client, cma_dev); 4182 4183 mutex_lock(&lock); 4184 list_add_tail(&cma_dev->list, &dev_list); 4185 list_for_each_entry(id_priv, &listen_any_list, list) 4186 cma_listen_on_dev(id_priv, cma_dev); 4187 mutex_unlock(&lock); 4188 } 4189 4190 static int cma_remove_id_dev(struct rdma_id_private *id_priv) 4191 { 4192 struct rdma_cm_event event; 4193 enum rdma_cm_state state; 4194 int ret = 0; 4195 4196 /* Record that we want to remove the device */ 4197 state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL); 4198 if (state == RDMA_CM_DESTROYING) 4199 return 0; 4200 4201 cma_cancel_operation(id_priv, state); 4202 mutex_lock(&id_priv->handler_mutex); 4203 4204 /* Check for destruction from another callback. */ 4205 if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL)) 4206 goto out; 4207 4208 memset(&event, 0, sizeof event); 4209 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL; 4210 ret = id_priv->id.event_handler(&id_priv->id, &event); 4211 out: 4212 mutex_unlock(&id_priv->handler_mutex); 4213 return ret; 4214 } 4215 4216 static void cma_process_remove(struct cma_device *cma_dev) 4217 { 4218 struct rdma_id_private *id_priv; 4219 int ret; 4220 4221 mutex_lock(&lock); 4222 while (!list_empty(&cma_dev->id_list)) { 4223 id_priv = list_entry(cma_dev->id_list.next, 4224 struct rdma_id_private, list); 4225 4226 list_del(&id_priv->listen_list); 4227 list_del_init(&id_priv->list); 4228 atomic_inc(&id_priv->refcount); 4229 mutex_unlock(&lock); 4230 4231 ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv); 4232 cma_deref_id(id_priv); 4233 if (ret) 4234 rdma_destroy_id(&id_priv->id); 4235 4236 mutex_lock(&lock); 4237 } 4238 mutex_unlock(&lock); 4239 4240 cma_deref_dev(cma_dev); 4241 wait_for_completion(&cma_dev->comp); 4242 } 4243 4244 static void cma_remove_one(struct ib_device *device, void *client_data) 4245 { 4246 struct cma_device *cma_dev = client_data; 4247 4248 if (!cma_dev) 4249 return; 4250 4251 mutex_lock(&lock); 4252 list_del(&cma_dev->list); 4253 mutex_unlock(&lock); 4254 4255 cma_process_remove(cma_dev); 4256 kfree(cma_dev->default_gid_type); 4257 kfree(cma_dev); 4258 } 4259 4260 static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb) 4261 { 4262 struct nlmsghdr *nlh; 4263 struct rdma_cm_id_stats *id_stats; 4264 struct rdma_id_private *id_priv; 4265 struct rdma_cm_id *id = NULL; 4266 struct cma_device *cma_dev; 4267 int i_dev = 0, i_id = 0; 4268 4269 /* 4270 * We export all of the IDs as a sequence of messages. Each 4271 * ID gets its own netlink message. 4272 */ 4273 mutex_lock(&lock); 4274 4275 list_for_each_entry(cma_dev, &dev_list, list) { 4276 if (i_dev < cb->args[0]) { 4277 i_dev++; 4278 continue; 4279 } 4280 4281 i_id = 0; 4282 list_for_each_entry(id_priv, &cma_dev->id_list, list) { 4283 if (i_id < cb->args[1]) { 4284 i_id++; 4285 continue; 4286 } 4287 4288 id_stats = ibnl_put_msg(skb, &nlh, cb->nlh->nlmsg_seq, 4289 sizeof *id_stats, RDMA_NL_RDMA_CM, 4290 RDMA_NL_RDMA_CM_ID_STATS, 4291 NLM_F_MULTI); 4292 if (!id_stats) 4293 goto out; 4294 4295 memset(id_stats, 0, sizeof *id_stats); 4296 id = &id_priv->id; 4297 id_stats->node_type = id->route.addr.dev_addr.dev_type; 4298 id_stats->port_num = id->port_num; 4299 id_stats->bound_dev_if = 4300 id->route.addr.dev_addr.bound_dev_if; 4301 4302 if (ibnl_put_attr(skb, nlh, 4303 rdma_addr_size(cma_src_addr(id_priv)), 4304 cma_src_addr(id_priv), 4305 RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) 4306 goto out; 4307 if (ibnl_put_attr(skb, nlh, 4308 rdma_addr_size(cma_src_addr(id_priv)), 4309 cma_dst_addr(id_priv), 4310 RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) 4311 goto out; 4312 4313 id_stats->pid = id_priv->owner; 4314 id_stats->port_space = id->ps; 4315 id_stats->cm_state = id_priv->state; 4316 id_stats->qp_num = id_priv->qp_num; 4317 id_stats->qp_type = id->qp_type; 4318 4319 i_id++; 4320 } 4321 4322 cb->args[1] = 0; 4323 i_dev++; 4324 } 4325 4326 out: 4327 mutex_unlock(&lock); 4328 cb->args[0] = i_dev; 4329 cb->args[1] = i_id; 4330 4331 return skb->len; 4332 } 4333 4334 static const struct ibnl_client_cbs cma_cb_table[] = { 4335 [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats, 4336 .module = THIS_MODULE }, 4337 }; 4338 4339 static int cma_init_net(struct net *net) 4340 { 4341 struct cma_pernet *pernet = cma_pernet(net); 4342 4343 idr_init(&pernet->tcp_ps); 4344 idr_init(&pernet->udp_ps); 4345 idr_init(&pernet->ipoib_ps); 4346 idr_init(&pernet->ib_ps); 4347 4348 return 0; 4349 } 4350 4351 static void cma_exit_net(struct net *net) 4352 { 4353 struct cma_pernet *pernet = cma_pernet(net); 4354 4355 idr_destroy(&pernet->tcp_ps); 4356 idr_destroy(&pernet->udp_ps); 4357 idr_destroy(&pernet->ipoib_ps); 4358 idr_destroy(&pernet->ib_ps); 4359 } 4360 4361 static struct pernet_operations cma_pernet_operations = { 4362 .init = cma_init_net, 4363 .exit = cma_exit_net, 4364 .id = &cma_pernet_id, 4365 .size = sizeof(struct cma_pernet), 4366 }; 4367 4368 static int __init cma_init(void) 4369 { 4370 int ret; 4371 4372 cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM); 4373 if (!cma_wq) 4374 return -ENOMEM; 4375 4376 ret = register_pernet_subsys(&cma_pernet_operations); 4377 if (ret) 4378 goto err_wq; 4379 4380 ib_sa_register_client(&sa_client); 4381 rdma_addr_register_client(&addr_client); 4382 register_netdevice_notifier(&cma_nb); 4383 4384 ret = ib_register_client(&cma_client); 4385 if (ret) 4386 goto err; 4387 4388 if (ibnl_add_client(RDMA_NL_RDMA_CM, ARRAY_SIZE(cma_cb_table), 4389 cma_cb_table)) 4390 pr_warn("RDMA CMA: failed to add netlink callback\n"); 4391 cma_configfs_init(); 4392 4393 return 0; 4394 4395 err: 4396 unregister_netdevice_notifier(&cma_nb); 4397 rdma_addr_unregister_client(&addr_client); 4398 ib_sa_unregister_client(&sa_client); 4399 err_wq: 4400 destroy_workqueue(cma_wq); 4401 return ret; 4402 } 4403 4404 static void __exit cma_cleanup(void) 4405 { 4406 cma_configfs_exit(); 4407 ibnl_remove_client(RDMA_NL_RDMA_CM); 4408 ib_unregister_client(&cma_client); 4409 unregister_netdevice_notifier(&cma_nb); 4410 rdma_addr_unregister_client(&addr_client); 4411 ib_sa_unregister_client(&sa_client); 4412 unregister_pernet_subsys(&cma_pernet_operations); 4413 destroy_workqueue(cma_wq); 4414 } 4415 4416 module_init(cma_init); 4417 module_exit(cma_cleanup); 4418