1 /* 2 * Copyright (c) 2005 Voltaire Inc. All rights reserved. 3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved. 4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved. 5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/completion.h> 37 #include <linux/in.h> 38 #include <linux/in6.h> 39 #include <linux/mutex.h> 40 #include <linux/random.h> 41 #include <linux/igmp.h> 42 #include <linux/idr.h> 43 #include <linux/inetdevice.h> 44 #include <linux/slab.h> 45 #include <linux/module.h> 46 #include <net/route.h> 47 48 #include <net/net_namespace.h> 49 #include <net/netns/generic.h> 50 #include <net/tcp.h> 51 #include <net/ipv6.h> 52 #include <net/ip_fib.h> 53 #include <net/ip6_route.h> 54 55 #include <rdma/rdma_cm.h> 56 #include <rdma/rdma_cm_ib.h> 57 #include <rdma/rdma_netlink.h> 58 #include <rdma/ib.h> 59 #include <rdma/ib_cache.h> 60 #include <rdma/ib_cm.h> 61 #include <rdma/ib_sa.h> 62 #include <rdma/iw_cm.h> 63 64 #include "core_priv.h" 65 66 MODULE_AUTHOR("Sean Hefty"); 67 MODULE_DESCRIPTION("Generic RDMA CM Agent"); 68 MODULE_LICENSE("Dual BSD/GPL"); 69 70 #define CMA_CM_RESPONSE_TIMEOUT 20 71 #define CMA_MAX_CM_RETRIES 15 72 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) 73 #define CMA_IBOE_PACKET_LIFETIME 18 74 75 static const char * const cma_events[] = { 76 [RDMA_CM_EVENT_ADDR_RESOLVED] = "address resolved", 77 [RDMA_CM_EVENT_ADDR_ERROR] = "address error", 78 [RDMA_CM_EVENT_ROUTE_RESOLVED] = "route resolved ", 79 [RDMA_CM_EVENT_ROUTE_ERROR] = "route error", 80 [RDMA_CM_EVENT_CONNECT_REQUEST] = "connect request", 81 [RDMA_CM_EVENT_CONNECT_RESPONSE] = "connect response", 82 [RDMA_CM_EVENT_CONNECT_ERROR] = "connect error", 83 [RDMA_CM_EVENT_UNREACHABLE] = "unreachable", 84 [RDMA_CM_EVENT_REJECTED] = "rejected", 85 [RDMA_CM_EVENT_ESTABLISHED] = "established", 86 [RDMA_CM_EVENT_DISCONNECTED] = "disconnected", 87 [RDMA_CM_EVENT_DEVICE_REMOVAL] = "device removal", 88 [RDMA_CM_EVENT_MULTICAST_JOIN] = "multicast join", 89 [RDMA_CM_EVENT_MULTICAST_ERROR] = "multicast error", 90 [RDMA_CM_EVENT_ADDR_CHANGE] = "address change", 91 [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit", 92 }; 93 94 const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event) 95 { 96 size_t index = event; 97 98 return (index < ARRAY_SIZE(cma_events) && cma_events[index]) ? 99 cma_events[index] : "unrecognized event"; 100 } 101 EXPORT_SYMBOL(rdma_event_msg); 102 103 static void cma_add_one(struct ib_device *device); 104 static void cma_remove_one(struct ib_device *device, void *client_data); 105 106 static struct ib_client cma_client = { 107 .name = "cma", 108 .add = cma_add_one, 109 .remove = cma_remove_one 110 }; 111 112 static struct ib_sa_client sa_client; 113 static struct rdma_addr_client addr_client; 114 static LIST_HEAD(dev_list); 115 static LIST_HEAD(listen_any_list); 116 static DEFINE_MUTEX(lock); 117 static struct workqueue_struct *cma_wq; 118 static int cma_pernet_id; 119 120 struct cma_pernet { 121 struct idr tcp_ps; 122 struct idr udp_ps; 123 struct idr ipoib_ps; 124 struct idr ib_ps; 125 }; 126 127 static struct cma_pernet *cma_pernet(struct net *net) 128 { 129 return net_generic(net, cma_pernet_id); 130 } 131 132 static struct idr *cma_pernet_idr(struct net *net, enum rdma_port_space ps) 133 { 134 struct cma_pernet *pernet = cma_pernet(net); 135 136 switch (ps) { 137 case RDMA_PS_TCP: 138 return &pernet->tcp_ps; 139 case RDMA_PS_UDP: 140 return &pernet->udp_ps; 141 case RDMA_PS_IPOIB: 142 return &pernet->ipoib_ps; 143 case RDMA_PS_IB: 144 return &pernet->ib_ps; 145 default: 146 return NULL; 147 } 148 } 149 150 struct cma_device { 151 struct list_head list; 152 struct ib_device *device; 153 struct completion comp; 154 atomic_t refcount; 155 struct list_head id_list; 156 enum ib_gid_type *default_gid_type; 157 }; 158 159 struct rdma_bind_list { 160 enum rdma_port_space ps; 161 struct hlist_head owners; 162 unsigned short port; 163 }; 164 165 static int cma_ps_alloc(struct net *net, enum rdma_port_space ps, 166 struct rdma_bind_list *bind_list, int snum) 167 { 168 struct idr *idr = cma_pernet_idr(net, ps); 169 170 return idr_alloc(idr, bind_list, snum, snum + 1, GFP_KERNEL); 171 } 172 173 static struct rdma_bind_list *cma_ps_find(struct net *net, 174 enum rdma_port_space ps, int snum) 175 { 176 struct idr *idr = cma_pernet_idr(net, ps); 177 178 return idr_find(idr, snum); 179 } 180 181 static void cma_ps_remove(struct net *net, enum rdma_port_space ps, int snum) 182 { 183 struct idr *idr = cma_pernet_idr(net, ps); 184 185 idr_remove(idr, snum); 186 } 187 188 enum { 189 CMA_OPTION_AFONLY, 190 }; 191 192 void cma_ref_dev(struct cma_device *cma_dev) 193 { 194 atomic_inc(&cma_dev->refcount); 195 } 196 197 struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter, 198 void *cookie) 199 { 200 struct cma_device *cma_dev; 201 struct cma_device *found_cma_dev = NULL; 202 203 mutex_lock(&lock); 204 205 list_for_each_entry(cma_dev, &dev_list, list) 206 if (filter(cma_dev->device, cookie)) { 207 found_cma_dev = cma_dev; 208 break; 209 } 210 211 if (found_cma_dev) 212 cma_ref_dev(found_cma_dev); 213 mutex_unlock(&lock); 214 return found_cma_dev; 215 } 216 217 int cma_get_default_gid_type(struct cma_device *cma_dev, 218 unsigned int port) 219 { 220 if (port < rdma_start_port(cma_dev->device) || 221 port > rdma_end_port(cma_dev->device)) 222 return -EINVAL; 223 224 return cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)]; 225 } 226 227 int cma_set_default_gid_type(struct cma_device *cma_dev, 228 unsigned int port, 229 enum ib_gid_type default_gid_type) 230 { 231 unsigned long supported_gids; 232 233 if (port < rdma_start_port(cma_dev->device) || 234 port > rdma_end_port(cma_dev->device)) 235 return -EINVAL; 236 237 supported_gids = roce_gid_type_mask_support(cma_dev->device, port); 238 239 if (!(supported_gids & 1 << default_gid_type)) 240 return -EINVAL; 241 242 cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)] = 243 default_gid_type; 244 245 return 0; 246 } 247 248 struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev) 249 { 250 return cma_dev->device; 251 } 252 253 /* 254 * Device removal can occur at anytime, so we need extra handling to 255 * serialize notifying the user of device removal with other callbacks. 256 * We do this by disabling removal notification while a callback is in process, 257 * and reporting it after the callback completes. 258 */ 259 struct rdma_id_private { 260 struct rdma_cm_id id; 261 262 struct rdma_bind_list *bind_list; 263 struct hlist_node node; 264 struct list_head list; /* listen_any_list or cma_device.list */ 265 struct list_head listen_list; /* per device listens */ 266 struct cma_device *cma_dev; 267 struct list_head mc_list; 268 269 int internal_id; 270 enum rdma_cm_state state; 271 spinlock_t lock; 272 struct mutex qp_mutex; 273 274 struct completion comp; 275 atomic_t refcount; 276 struct mutex handler_mutex; 277 278 int backlog; 279 int timeout_ms; 280 struct ib_sa_query *query; 281 int query_id; 282 union { 283 struct ib_cm_id *ib; 284 struct iw_cm_id *iw; 285 } cm_id; 286 287 u32 seq_num; 288 u32 qkey; 289 u32 qp_num; 290 pid_t owner; 291 u32 options; 292 u8 srq; 293 u8 tos; 294 u8 reuseaddr; 295 u8 afonly; 296 enum ib_gid_type gid_type; 297 }; 298 299 struct cma_multicast { 300 struct rdma_id_private *id_priv; 301 union { 302 struct ib_sa_multicast *ib; 303 } multicast; 304 struct list_head list; 305 void *context; 306 struct sockaddr_storage addr; 307 struct kref mcref; 308 bool igmp_joined; 309 }; 310 311 struct cma_work { 312 struct work_struct work; 313 struct rdma_id_private *id; 314 enum rdma_cm_state old_state; 315 enum rdma_cm_state new_state; 316 struct rdma_cm_event event; 317 }; 318 319 struct cma_ndev_work { 320 struct work_struct work; 321 struct rdma_id_private *id; 322 struct rdma_cm_event event; 323 }; 324 325 struct iboe_mcast_work { 326 struct work_struct work; 327 struct rdma_id_private *id; 328 struct cma_multicast *mc; 329 }; 330 331 union cma_ip_addr { 332 struct in6_addr ip6; 333 struct { 334 __be32 pad[3]; 335 __be32 addr; 336 } ip4; 337 }; 338 339 struct cma_hdr { 340 u8 cma_version; 341 u8 ip_version; /* IP version: 7:4 */ 342 __be16 port; 343 union cma_ip_addr src_addr; 344 union cma_ip_addr dst_addr; 345 }; 346 347 #define CMA_VERSION 0x00 348 349 struct cma_req_info { 350 struct ib_device *device; 351 int port; 352 union ib_gid local_gid; 353 __be64 service_id; 354 u16 pkey; 355 bool has_gid:1; 356 }; 357 358 static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp) 359 { 360 unsigned long flags; 361 int ret; 362 363 spin_lock_irqsave(&id_priv->lock, flags); 364 ret = (id_priv->state == comp); 365 spin_unlock_irqrestore(&id_priv->lock, flags); 366 return ret; 367 } 368 369 static int cma_comp_exch(struct rdma_id_private *id_priv, 370 enum rdma_cm_state comp, enum rdma_cm_state exch) 371 { 372 unsigned long flags; 373 int ret; 374 375 spin_lock_irqsave(&id_priv->lock, flags); 376 if ((ret = (id_priv->state == comp))) 377 id_priv->state = exch; 378 spin_unlock_irqrestore(&id_priv->lock, flags); 379 return ret; 380 } 381 382 static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv, 383 enum rdma_cm_state exch) 384 { 385 unsigned long flags; 386 enum rdma_cm_state old; 387 388 spin_lock_irqsave(&id_priv->lock, flags); 389 old = id_priv->state; 390 id_priv->state = exch; 391 spin_unlock_irqrestore(&id_priv->lock, flags); 392 return old; 393 } 394 395 static inline u8 cma_get_ip_ver(const struct cma_hdr *hdr) 396 { 397 return hdr->ip_version >> 4; 398 } 399 400 static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver) 401 { 402 hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF); 403 } 404 405 static int cma_igmp_send(struct net_device *ndev, union ib_gid *mgid, bool join) 406 { 407 struct in_device *in_dev = NULL; 408 409 if (ndev) { 410 rtnl_lock(); 411 in_dev = __in_dev_get_rtnl(ndev); 412 if (in_dev) { 413 if (join) 414 ip_mc_inc_group(in_dev, 415 *(__be32 *)(mgid->raw + 12)); 416 else 417 ip_mc_dec_group(in_dev, 418 *(__be32 *)(mgid->raw + 12)); 419 } 420 rtnl_unlock(); 421 } 422 return (in_dev) ? 0 : -ENODEV; 423 } 424 425 static void _cma_attach_to_dev(struct rdma_id_private *id_priv, 426 struct cma_device *cma_dev) 427 { 428 cma_ref_dev(cma_dev); 429 id_priv->cma_dev = cma_dev; 430 id_priv->gid_type = 0; 431 id_priv->id.device = cma_dev->device; 432 id_priv->id.route.addr.dev_addr.transport = 433 rdma_node_get_transport(cma_dev->device->node_type); 434 list_add_tail(&id_priv->list, &cma_dev->id_list); 435 } 436 437 static void cma_attach_to_dev(struct rdma_id_private *id_priv, 438 struct cma_device *cma_dev) 439 { 440 _cma_attach_to_dev(id_priv, cma_dev); 441 id_priv->gid_type = 442 cma_dev->default_gid_type[id_priv->id.port_num - 443 rdma_start_port(cma_dev->device)]; 444 } 445 446 void cma_deref_dev(struct cma_device *cma_dev) 447 { 448 if (atomic_dec_and_test(&cma_dev->refcount)) 449 complete(&cma_dev->comp); 450 } 451 452 static inline void release_mc(struct kref *kref) 453 { 454 struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref); 455 456 kfree(mc->multicast.ib); 457 kfree(mc); 458 } 459 460 static void cma_release_dev(struct rdma_id_private *id_priv) 461 { 462 mutex_lock(&lock); 463 list_del(&id_priv->list); 464 cma_deref_dev(id_priv->cma_dev); 465 id_priv->cma_dev = NULL; 466 mutex_unlock(&lock); 467 } 468 469 static inline struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv) 470 { 471 return (struct sockaddr *) &id_priv->id.route.addr.src_addr; 472 } 473 474 static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv) 475 { 476 return (struct sockaddr *) &id_priv->id.route.addr.dst_addr; 477 } 478 479 static inline unsigned short cma_family(struct rdma_id_private *id_priv) 480 { 481 return id_priv->id.route.addr.src_addr.ss_family; 482 } 483 484 static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey) 485 { 486 struct ib_sa_mcmember_rec rec; 487 int ret = 0; 488 489 if (id_priv->qkey) { 490 if (qkey && id_priv->qkey != qkey) 491 return -EINVAL; 492 return 0; 493 } 494 495 if (qkey) { 496 id_priv->qkey = qkey; 497 return 0; 498 } 499 500 switch (id_priv->id.ps) { 501 case RDMA_PS_UDP: 502 case RDMA_PS_IB: 503 id_priv->qkey = RDMA_UDP_QKEY; 504 break; 505 case RDMA_PS_IPOIB: 506 ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid); 507 ret = ib_sa_get_mcmember_rec(id_priv->id.device, 508 id_priv->id.port_num, &rec.mgid, 509 &rec); 510 if (!ret) 511 id_priv->qkey = be32_to_cpu(rec.qkey); 512 break; 513 default: 514 break; 515 } 516 return ret; 517 } 518 519 static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr) 520 { 521 dev_addr->dev_type = ARPHRD_INFINIBAND; 522 rdma_addr_set_sgid(dev_addr, (union ib_gid *) &sib->sib_addr); 523 ib_addr_set_pkey(dev_addr, ntohs(sib->sib_pkey)); 524 } 525 526 static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) 527 { 528 int ret; 529 530 if (addr->sa_family != AF_IB) { 531 ret = rdma_translate_ip(addr, dev_addr, NULL); 532 } else { 533 cma_translate_ib((struct sockaddr_ib *) addr, dev_addr); 534 ret = 0; 535 } 536 537 return ret; 538 } 539 540 static inline int cma_validate_port(struct ib_device *device, u8 port, 541 enum ib_gid_type gid_type, 542 union ib_gid *gid, int dev_type, 543 int bound_if_index) 544 { 545 int ret = -ENODEV; 546 struct net_device *ndev = NULL; 547 548 if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port)) 549 return ret; 550 551 if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port)) 552 return ret; 553 554 if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) { 555 ndev = dev_get_by_index(&init_net, bound_if_index); 556 if (ndev && ndev->flags & IFF_LOOPBACK) { 557 pr_info("detected loopback device\n"); 558 dev_put(ndev); 559 560 if (!device->get_netdev) 561 return -EOPNOTSUPP; 562 563 ndev = device->get_netdev(device, port); 564 if (!ndev) 565 return -ENODEV; 566 } 567 } else { 568 gid_type = IB_GID_TYPE_IB; 569 } 570 571 ret = ib_find_cached_gid_by_port(device, gid, gid_type, port, 572 ndev, NULL); 573 574 if (ndev) 575 dev_put(ndev); 576 577 return ret; 578 } 579 580 static int cma_acquire_dev(struct rdma_id_private *id_priv, 581 struct rdma_id_private *listen_id_priv) 582 { 583 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 584 struct cma_device *cma_dev; 585 union ib_gid gid, iboe_gid, *gidp; 586 int ret = -ENODEV; 587 u8 port; 588 589 if (dev_addr->dev_type != ARPHRD_INFINIBAND && 590 id_priv->id.ps == RDMA_PS_IPOIB) 591 return -EINVAL; 592 593 mutex_lock(&lock); 594 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, 595 &iboe_gid); 596 597 memcpy(&gid, dev_addr->src_dev_addr + 598 rdma_addr_gid_offset(dev_addr), sizeof gid); 599 600 if (listen_id_priv) { 601 cma_dev = listen_id_priv->cma_dev; 602 port = listen_id_priv->id.port_num; 603 gidp = rdma_protocol_roce(cma_dev->device, port) ? 604 &iboe_gid : &gid; 605 606 ret = cma_validate_port(cma_dev->device, port, 607 rdma_protocol_ib(cma_dev->device, port) ? 608 IB_GID_TYPE_IB : 609 listen_id_priv->gid_type, gidp, 610 dev_addr->dev_type, 611 dev_addr->bound_dev_if); 612 if (!ret) { 613 id_priv->id.port_num = port; 614 goto out; 615 } 616 } 617 618 list_for_each_entry(cma_dev, &dev_list, list) { 619 for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) { 620 if (listen_id_priv && 621 listen_id_priv->cma_dev == cma_dev && 622 listen_id_priv->id.port_num == port) 623 continue; 624 625 gidp = rdma_protocol_roce(cma_dev->device, port) ? 626 &iboe_gid : &gid; 627 628 ret = cma_validate_port(cma_dev->device, port, 629 rdma_protocol_ib(cma_dev->device, port) ? 630 IB_GID_TYPE_IB : 631 cma_dev->default_gid_type[port - 1], 632 gidp, dev_addr->dev_type, 633 dev_addr->bound_dev_if); 634 if (!ret) { 635 id_priv->id.port_num = port; 636 goto out; 637 } 638 } 639 } 640 641 out: 642 if (!ret) 643 cma_attach_to_dev(id_priv, cma_dev); 644 645 mutex_unlock(&lock); 646 return ret; 647 } 648 649 /* 650 * Select the source IB device and address to reach the destination IB address. 651 */ 652 static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) 653 { 654 struct cma_device *cma_dev, *cur_dev; 655 struct sockaddr_ib *addr; 656 union ib_gid gid, sgid, *dgid; 657 u16 pkey, index; 658 u8 p; 659 int i; 660 661 cma_dev = NULL; 662 addr = (struct sockaddr_ib *) cma_dst_addr(id_priv); 663 dgid = (union ib_gid *) &addr->sib_addr; 664 pkey = ntohs(addr->sib_pkey); 665 666 list_for_each_entry(cur_dev, &dev_list, list) { 667 for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { 668 if (!rdma_cap_af_ib(cur_dev->device, p)) 669 continue; 670 671 if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index)) 672 continue; 673 674 for (i = 0; !ib_get_cached_gid(cur_dev->device, p, i, 675 &gid, NULL); 676 i++) { 677 if (!memcmp(&gid, dgid, sizeof(gid))) { 678 cma_dev = cur_dev; 679 sgid = gid; 680 id_priv->id.port_num = p; 681 goto found; 682 } 683 684 if (!cma_dev && (gid.global.subnet_prefix == 685 dgid->global.subnet_prefix)) { 686 cma_dev = cur_dev; 687 sgid = gid; 688 id_priv->id.port_num = p; 689 } 690 } 691 } 692 } 693 694 if (!cma_dev) 695 return -ENODEV; 696 697 found: 698 cma_attach_to_dev(id_priv, cma_dev); 699 addr = (struct sockaddr_ib *) cma_src_addr(id_priv); 700 memcpy(&addr->sib_addr, &sgid, sizeof sgid); 701 cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); 702 return 0; 703 } 704 705 static void cma_deref_id(struct rdma_id_private *id_priv) 706 { 707 if (atomic_dec_and_test(&id_priv->refcount)) 708 complete(&id_priv->comp); 709 } 710 711 static int cma_disable_callback(struct rdma_id_private *id_priv, 712 enum rdma_cm_state state) 713 { 714 mutex_lock(&id_priv->handler_mutex); 715 if (id_priv->state != state) { 716 mutex_unlock(&id_priv->handler_mutex); 717 return -EINVAL; 718 } 719 return 0; 720 } 721 722 struct rdma_cm_id *rdma_create_id(struct net *net, 723 rdma_cm_event_handler event_handler, 724 void *context, enum rdma_port_space ps, 725 enum ib_qp_type qp_type) 726 { 727 struct rdma_id_private *id_priv; 728 729 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL); 730 if (!id_priv) 731 return ERR_PTR(-ENOMEM); 732 733 id_priv->owner = task_pid_nr(current); 734 id_priv->state = RDMA_CM_IDLE; 735 id_priv->id.context = context; 736 id_priv->id.event_handler = event_handler; 737 id_priv->id.ps = ps; 738 id_priv->id.qp_type = qp_type; 739 spin_lock_init(&id_priv->lock); 740 mutex_init(&id_priv->qp_mutex); 741 init_completion(&id_priv->comp); 742 atomic_set(&id_priv->refcount, 1); 743 mutex_init(&id_priv->handler_mutex); 744 INIT_LIST_HEAD(&id_priv->listen_list); 745 INIT_LIST_HEAD(&id_priv->mc_list); 746 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); 747 id_priv->id.route.addr.dev_addr.net = get_net(net); 748 749 return &id_priv->id; 750 } 751 EXPORT_SYMBOL(rdma_create_id); 752 753 static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 754 { 755 struct ib_qp_attr qp_attr; 756 int qp_attr_mask, ret; 757 758 qp_attr.qp_state = IB_QPS_INIT; 759 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 760 if (ret) 761 return ret; 762 763 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); 764 if (ret) 765 return ret; 766 767 qp_attr.qp_state = IB_QPS_RTR; 768 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE); 769 if (ret) 770 return ret; 771 772 qp_attr.qp_state = IB_QPS_RTS; 773 qp_attr.sq_psn = 0; 774 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN); 775 776 return ret; 777 } 778 779 static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 780 { 781 struct ib_qp_attr qp_attr; 782 int qp_attr_mask, ret; 783 784 qp_attr.qp_state = IB_QPS_INIT; 785 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 786 if (ret) 787 return ret; 788 789 return ib_modify_qp(qp, &qp_attr, qp_attr_mask); 790 } 791 792 int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, 793 struct ib_qp_init_attr *qp_init_attr) 794 { 795 struct rdma_id_private *id_priv; 796 struct ib_qp *qp; 797 int ret; 798 799 id_priv = container_of(id, struct rdma_id_private, id); 800 if (id->device != pd->device) 801 return -EINVAL; 802 803 qp = ib_create_qp(pd, qp_init_attr); 804 if (IS_ERR(qp)) 805 return PTR_ERR(qp); 806 807 if (id->qp_type == IB_QPT_UD) 808 ret = cma_init_ud_qp(id_priv, qp); 809 else 810 ret = cma_init_conn_qp(id_priv, qp); 811 if (ret) 812 goto err; 813 814 id->qp = qp; 815 id_priv->qp_num = qp->qp_num; 816 id_priv->srq = (qp->srq != NULL); 817 return 0; 818 err: 819 ib_destroy_qp(qp); 820 return ret; 821 } 822 EXPORT_SYMBOL(rdma_create_qp); 823 824 void rdma_destroy_qp(struct rdma_cm_id *id) 825 { 826 struct rdma_id_private *id_priv; 827 828 id_priv = container_of(id, struct rdma_id_private, id); 829 mutex_lock(&id_priv->qp_mutex); 830 ib_destroy_qp(id_priv->id.qp); 831 id_priv->id.qp = NULL; 832 mutex_unlock(&id_priv->qp_mutex); 833 } 834 EXPORT_SYMBOL(rdma_destroy_qp); 835 836 static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, 837 struct rdma_conn_param *conn_param) 838 { 839 struct ib_qp_attr qp_attr; 840 int qp_attr_mask, ret; 841 union ib_gid sgid; 842 843 mutex_lock(&id_priv->qp_mutex); 844 if (!id_priv->id.qp) { 845 ret = 0; 846 goto out; 847 } 848 849 /* Need to update QP attributes from default values. */ 850 qp_attr.qp_state = IB_QPS_INIT; 851 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 852 if (ret) 853 goto out; 854 855 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 856 if (ret) 857 goto out; 858 859 qp_attr.qp_state = IB_QPS_RTR; 860 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 861 if (ret) 862 goto out; 863 864 ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num, 865 qp_attr.ah_attr.grh.sgid_index, &sgid, NULL); 866 if (ret) 867 goto out; 868 869 BUG_ON(id_priv->cma_dev->device != id_priv->id.device); 870 871 if (conn_param) 872 qp_attr.max_dest_rd_atomic = conn_param->responder_resources; 873 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 874 out: 875 mutex_unlock(&id_priv->qp_mutex); 876 return ret; 877 } 878 879 static int cma_modify_qp_rts(struct rdma_id_private *id_priv, 880 struct rdma_conn_param *conn_param) 881 { 882 struct ib_qp_attr qp_attr; 883 int qp_attr_mask, ret; 884 885 mutex_lock(&id_priv->qp_mutex); 886 if (!id_priv->id.qp) { 887 ret = 0; 888 goto out; 889 } 890 891 qp_attr.qp_state = IB_QPS_RTS; 892 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 893 if (ret) 894 goto out; 895 896 if (conn_param) 897 qp_attr.max_rd_atomic = conn_param->initiator_depth; 898 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 899 out: 900 mutex_unlock(&id_priv->qp_mutex); 901 return ret; 902 } 903 904 static int cma_modify_qp_err(struct rdma_id_private *id_priv) 905 { 906 struct ib_qp_attr qp_attr; 907 int ret; 908 909 mutex_lock(&id_priv->qp_mutex); 910 if (!id_priv->id.qp) { 911 ret = 0; 912 goto out; 913 } 914 915 qp_attr.qp_state = IB_QPS_ERR; 916 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE); 917 out: 918 mutex_unlock(&id_priv->qp_mutex); 919 return ret; 920 } 921 922 static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, 923 struct ib_qp_attr *qp_attr, int *qp_attr_mask) 924 { 925 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 926 int ret; 927 u16 pkey; 928 929 if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num)) 930 pkey = 0xffff; 931 else 932 pkey = ib_addr_get_pkey(dev_addr); 933 934 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, 935 pkey, &qp_attr->pkey_index); 936 if (ret) 937 return ret; 938 939 qp_attr->port_num = id_priv->id.port_num; 940 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; 941 942 if (id_priv->id.qp_type == IB_QPT_UD) { 943 ret = cma_set_qkey(id_priv, 0); 944 if (ret) 945 return ret; 946 947 qp_attr->qkey = id_priv->qkey; 948 *qp_attr_mask |= IB_QP_QKEY; 949 } else { 950 qp_attr->qp_access_flags = 0; 951 *qp_attr_mask |= IB_QP_ACCESS_FLAGS; 952 } 953 return 0; 954 } 955 956 int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, 957 int *qp_attr_mask) 958 { 959 struct rdma_id_private *id_priv; 960 int ret = 0; 961 962 id_priv = container_of(id, struct rdma_id_private, id); 963 if (rdma_cap_ib_cm(id->device, id->port_num)) { 964 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) 965 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); 966 else 967 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, 968 qp_attr_mask); 969 970 if (qp_attr->qp_state == IB_QPS_RTR) 971 qp_attr->rq_psn = id_priv->seq_num; 972 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 973 if (!id_priv->cm_id.iw) { 974 qp_attr->qp_access_flags = 0; 975 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; 976 } else 977 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, 978 qp_attr_mask); 979 } else 980 ret = -ENOSYS; 981 982 return ret; 983 } 984 EXPORT_SYMBOL(rdma_init_qp_attr); 985 986 static inline int cma_zero_addr(struct sockaddr *addr) 987 { 988 switch (addr->sa_family) { 989 case AF_INET: 990 return ipv4_is_zeronet(((struct sockaddr_in *)addr)->sin_addr.s_addr); 991 case AF_INET6: 992 return ipv6_addr_any(&((struct sockaddr_in6 *) addr)->sin6_addr); 993 case AF_IB: 994 return ib_addr_any(&((struct sockaddr_ib *) addr)->sib_addr); 995 default: 996 return 0; 997 } 998 } 999 1000 static inline int cma_loopback_addr(struct sockaddr *addr) 1001 { 1002 switch (addr->sa_family) { 1003 case AF_INET: 1004 return ipv4_is_loopback(((struct sockaddr_in *) addr)->sin_addr.s_addr); 1005 case AF_INET6: 1006 return ipv6_addr_loopback(&((struct sockaddr_in6 *) addr)->sin6_addr); 1007 case AF_IB: 1008 return ib_addr_loopback(&((struct sockaddr_ib *) addr)->sib_addr); 1009 default: 1010 return 0; 1011 } 1012 } 1013 1014 static inline int cma_any_addr(struct sockaddr *addr) 1015 { 1016 return cma_zero_addr(addr) || cma_loopback_addr(addr); 1017 } 1018 1019 static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst) 1020 { 1021 if (src->sa_family != dst->sa_family) 1022 return -1; 1023 1024 switch (src->sa_family) { 1025 case AF_INET: 1026 return ((struct sockaddr_in *) src)->sin_addr.s_addr != 1027 ((struct sockaddr_in *) dst)->sin_addr.s_addr; 1028 case AF_INET6: 1029 return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr, 1030 &((struct sockaddr_in6 *) dst)->sin6_addr); 1031 default: 1032 return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr, 1033 &((struct sockaddr_ib *) dst)->sib_addr); 1034 } 1035 } 1036 1037 static __be16 cma_port(struct sockaddr *addr) 1038 { 1039 struct sockaddr_ib *sib; 1040 1041 switch (addr->sa_family) { 1042 case AF_INET: 1043 return ((struct sockaddr_in *) addr)->sin_port; 1044 case AF_INET6: 1045 return ((struct sockaddr_in6 *) addr)->sin6_port; 1046 case AF_IB: 1047 sib = (struct sockaddr_ib *) addr; 1048 return htons((u16) (be64_to_cpu(sib->sib_sid) & 1049 be64_to_cpu(sib->sib_sid_mask))); 1050 default: 1051 return 0; 1052 } 1053 } 1054 1055 static inline int cma_any_port(struct sockaddr *addr) 1056 { 1057 return !cma_port(addr); 1058 } 1059 1060 static void cma_save_ib_info(struct sockaddr *src_addr, 1061 struct sockaddr *dst_addr, 1062 struct rdma_cm_id *listen_id, 1063 struct ib_sa_path_rec *path) 1064 { 1065 struct sockaddr_ib *listen_ib, *ib; 1066 1067 listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr; 1068 if (src_addr) { 1069 ib = (struct sockaddr_ib *)src_addr; 1070 ib->sib_family = AF_IB; 1071 if (path) { 1072 ib->sib_pkey = path->pkey; 1073 ib->sib_flowinfo = path->flow_label; 1074 memcpy(&ib->sib_addr, &path->sgid, 16); 1075 ib->sib_sid = path->service_id; 1076 ib->sib_scope_id = 0; 1077 } else { 1078 ib->sib_pkey = listen_ib->sib_pkey; 1079 ib->sib_flowinfo = listen_ib->sib_flowinfo; 1080 ib->sib_addr = listen_ib->sib_addr; 1081 ib->sib_sid = listen_ib->sib_sid; 1082 ib->sib_scope_id = listen_ib->sib_scope_id; 1083 } 1084 ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL); 1085 } 1086 if (dst_addr) { 1087 ib = (struct sockaddr_ib *)dst_addr; 1088 ib->sib_family = AF_IB; 1089 if (path) { 1090 ib->sib_pkey = path->pkey; 1091 ib->sib_flowinfo = path->flow_label; 1092 memcpy(&ib->sib_addr, &path->dgid, 16); 1093 } 1094 } 1095 } 1096 1097 static void cma_save_ip4_info(struct sockaddr *src_addr, 1098 struct sockaddr *dst_addr, 1099 struct cma_hdr *hdr, 1100 __be16 local_port) 1101 { 1102 struct sockaddr_in *ip4; 1103 1104 if (src_addr) { 1105 ip4 = (struct sockaddr_in *)src_addr; 1106 ip4->sin_family = AF_INET; 1107 ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr; 1108 ip4->sin_port = local_port; 1109 } 1110 1111 if (dst_addr) { 1112 ip4 = (struct sockaddr_in *)dst_addr; 1113 ip4->sin_family = AF_INET; 1114 ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr; 1115 ip4->sin_port = hdr->port; 1116 } 1117 } 1118 1119 static void cma_save_ip6_info(struct sockaddr *src_addr, 1120 struct sockaddr *dst_addr, 1121 struct cma_hdr *hdr, 1122 __be16 local_port) 1123 { 1124 struct sockaddr_in6 *ip6; 1125 1126 if (src_addr) { 1127 ip6 = (struct sockaddr_in6 *)src_addr; 1128 ip6->sin6_family = AF_INET6; 1129 ip6->sin6_addr = hdr->dst_addr.ip6; 1130 ip6->sin6_port = local_port; 1131 } 1132 1133 if (dst_addr) { 1134 ip6 = (struct sockaddr_in6 *)dst_addr; 1135 ip6->sin6_family = AF_INET6; 1136 ip6->sin6_addr = hdr->src_addr.ip6; 1137 ip6->sin6_port = hdr->port; 1138 } 1139 } 1140 1141 static u16 cma_port_from_service_id(__be64 service_id) 1142 { 1143 return (u16)be64_to_cpu(service_id); 1144 } 1145 1146 static int cma_save_ip_info(struct sockaddr *src_addr, 1147 struct sockaddr *dst_addr, 1148 struct ib_cm_event *ib_event, 1149 __be64 service_id) 1150 { 1151 struct cma_hdr *hdr; 1152 __be16 port; 1153 1154 hdr = ib_event->private_data; 1155 if (hdr->cma_version != CMA_VERSION) 1156 return -EINVAL; 1157 1158 port = htons(cma_port_from_service_id(service_id)); 1159 1160 switch (cma_get_ip_ver(hdr)) { 1161 case 4: 1162 cma_save_ip4_info(src_addr, dst_addr, hdr, port); 1163 break; 1164 case 6: 1165 cma_save_ip6_info(src_addr, dst_addr, hdr, port); 1166 break; 1167 default: 1168 return -EAFNOSUPPORT; 1169 } 1170 1171 return 0; 1172 } 1173 1174 static int cma_save_net_info(struct sockaddr *src_addr, 1175 struct sockaddr *dst_addr, 1176 struct rdma_cm_id *listen_id, 1177 struct ib_cm_event *ib_event, 1178 sa_family_t sa_family, __be64 service_id) 1179 { 1180 if (sa_family == AF_IB) { 1181 if (ib_event->event == IB_CM_REQ_RECEIVED) 1182 cma_save_ib_info(src_addr, dst_addr, listen_id, 1183 ib_event->param.req_rcvd.primary_path); 1184 else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) 1185 cma_save_ib_info(src_addr, dst_addr, listen_id, NULL); 1186 return 0; 1187 } 1188 1189 return cma_save_ip_info(src_addr, dst_addr, ib_event, service_id); 1190 } 1191 1192 static int cma_save_req_info(const struct ib_cm_event *ib_event, 1193 struct cma_req_info *req) 1194 { 1195 const struct ib_cm_req_event_param *req_param = 1196 &ib_event->param.req_rcvd; 1197 const struct ib_cm_sidr_req_event_param *sidr_param = 1198 &ib_event->param.sidr_req_rcvd; 1199 1200 switch (ib_event->event) { 1201 case IB_CM_REQ_RECEIVED: 1202 req->device = req_param->listen_id->device; 1203 req->port = req_param->port; 1204 memcpy(&req->local_gid, &req_param->primary_path->sgid, 1205 sizeof(req->local_gid)); 1206 req->has_gid = true; 1207 req->service_id = req_param->primary_path->service_id; 1208 req->pkey = be16_to_cpu(req_param->primary_path->pkey); 1209 if (req->pkey != req_param->bth_pkey) 1210 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n" 1211 "RDMA CMA: in the future this may cause the request to be dropped\n", 1212 req_param->bth_pkey, req->pkey); 1213 break; 1214 case IB_CM_SIDR_REQ_RECEIVED: 1215 req->device = sidr_param->listen_id->device; 1216 req->port = sidr_param->port; 1217 req->has_gid = false; 1218 req->service_id = sidr_param->service_id; 1219 req->pkey = sidr_param->pkey; 1220 if (req->pkey != sidr_param->bth_pkey) 1221 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and SIDR request payload P_Key (0x%x)\n" 1222 "RDMA CMA: in the future this may cause the request to be dropped\n", 1223 sidr_param->bth_pkey, req->pkey); 1224 break; 1225 default: 1226 return -EINVAL; 1227 } 1228 1229 return 0; 1230 } 1231 1232 static bool validate_ipv4_net_dev(struct net_device *net_dev, 1233 const struct sockaddr_in *dst_addr, 1234 const struct sockaddr_in *src_addr) 1235 { 1236 __be32 daddr = dst_addr->sin_addr.s_addr, 1237 saddr = src_addr->sin_addr.s_addr; 1238 struct fib_result res; 1239 struct flowi4 fl4; 1240 int err; 1241 bool ret; 1242 1243 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) || 1244 ipv4_is_lbcast(daddr) || ipv4_is_zeronet(saddr) || 1245 ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr) || 1246 ipv4_is_loopback(saddr)) 1247 return false; 1248 1249 memset(&fl4, 0, sizeof(fl4)); 1250 fl4.flowi4_iif = net_dev->ifindex; 1251 fl4.daddr = daddr; 1252 fl4.saddr = saddr; 1253 1254 rcu_read_lock(); 1255 err = fib_lookup(dev_net(net_dev), &fl4, &res, 0); 1256 ret = err == 0 && FIB_RES_DEV(res) == net_dev; 1257 rcu_read_unlock(); 1258 1259 return ret; 1260 } 1261 1262 static bool validate_ipv6_net_dev(struct net_device *net_dev, 1263 const struct sockaddr_in6 *dst_addr, 1264 const struct sockaddr_in6 *src_addr) 1265 { 1266 #if IS_ENABLED(CONFIG_IPV6) 1267 const int strict = ipv6_addr_type(&dst_addr->sin6_addr) & 1268 IPV6_ADDR_LINKLOCAL; 1269 struct rt6_info *rt = rt6_lookup(dev_net(net_dev), &dst_addr->sin6_addr, 1270 &src_addr->sin6_addr, net_dev->ifindex, 1271 strict); 1272 bool ret; 1273 1274 if (!rt) 1275 return false; 1276 1277 ret = rt->rt6i_idev->dev == net_dev; 1278 ip6_rt_put(rt); 1279 1280 return ret; 1281 #else 1282 return false; 1283 #endif 1284 } 1285 1286 static bool validate_net_dev(struct net_device *net_dev, 1287 const struct sockaddr *daddr, 1288 const struct sockaddr *saddr) 1289 { 1290 const struct sockaddr_in *daddr4 = (const struct sockaddr_in *)daddr; 1291 const struct sockaddr_in *saddr4 = (const struct sockaddr_in *)saddr; 1292 const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr; 1293 const struct sockaddr_in6 *saddr6 = (const struct sockaddr_in6 *)saddr; 1294 1295 switch (daddr->sa_family) { 1296 case AF_INET: 1297 return saddr->sa_family == AF_INET && 1298 validate_ipv4_net_dev(net_dev, daddr4, saddr4); 1299 1300 case AF_INET6: 1301 return saddr->sa_family == AF_INET6 && 1302 validate_ipv6_net_dev(net_dev, daddr6, saddr6); 1303 1304 default: 1305 return false; 1306 } 1307 } 1308 1309 static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event, 1310 const struct cma_req_info *req) 1311 { 1312 struct sockaddr_storage listen_addr_storage, src_addr_storage; 1313 struct sockaddr *listen_addr = (struct sockaddr *)&listen_addr_storage, 1314 *src_addr = (struct sockaddr *)&src_addr_storage; 1315 struct net_device *net_dev; 1316 const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL; 1317 int err; 1318 1319 err = cma_save_ip_info(listen_addr, src_addr, ib_event, 1320 req->service_id); 1321 if (err) 1322 return ERR_PTR(err); 1323 1324 net_dev = ib_get_net_dev_by_params(req->device, req->port, req->pkey, 1325 gid, listen_addr); 1326 if (!net_dev) 1327 return ERR_PTR(-ENODEV); 1328 1329 if (!validate_net_dev(net_dev, listen_addr, src_addr)) { 1330 dev_put(net_dev); 1331 return ERR_PTR(-EHOSTUNREACH); 1332 } 1333 1334 return net_dev; 1335 } 1336 1337 static enum rdma_port_space rdma_ps_from_service_id(__be64 service_id) 1338 { 1339 return (be64_to_cpu(service_id) >> 16) & 0xffff; 1340 } 1341 1342 static bool cma_match_private_data(struct rdma_id_private *id_priv, 1343 const struct cma_hdr *hdr) 1344 { 1345 struct sockaddr *addr = cma_src_addr(id_priv); 1346 __be32 ip4_addr; 1347 struct in6_addr ip6_addr; 1348 1349 if (cma_any_addr(addr) && !id_priv->afonly) 1350 return true; 1351 1352 switch (addr->sa_family) { 1353 case AF_INET: 1354 ip4_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr; 1355 if (cma_get_ip_ver(hdr) != 4) 1356 return false; 1357 if (!cma_any_addr(addr) && 1358 hdr->dst_addr.ip4.addr != ip4_addr) 1359 return false; 1360 break; 1361 case AF_INET6: 1362 ip6_addr = ((struct sockaddr_in6 *)addr)->sin6_addr; 1363 if (cma_get_ip_ver(hdr) != 6) 1364 return false; 1365 if (!cma_any_addr(addr) && 1366 memcmp(&hdr->dst_addr.ip6, &ip6_addr, sizeof(ip6_addr))) 1367 return false; 1368 break; 1369 case AF_IB: 1370 return true; 1371 default: 1372 return false; 1373 } 1374 1375 return true; 1376 } 1377 1378 static bool cma_protocol_roce_dev_port(struct ib_device *device, int port_num) 1379 { 1380 enum rdma_link_layer ll = rdma_port_get_link_layer(device, port_num); 1381 enum rdma_transport_type transport = 1382 rdma_node_get_transport(device->node_type); 1383 1384 return ll == IB_LINK_LAYER_ETHERNET && transport == RDMA_TRANSPORT_IB; 1385 } 1386 1387 static bool cma_protocol_roce(const struct rdma_cm_id *id) 1388 { 1389 struct ib_device *device = id->device; 1390 const int port_num = id->port_num ?: rdma_start_port(device); 1391 1392 return cma_protocol_roce_dev_port(device, port_num); 1393 } 1394 1395 static bool cma_match_net_dev(const struct rdma_cm_id *id, 1396 const struct net_device *net_dev, 1397 u8 port_num) 1398 { 1399 const struct rdma_addr *addr = &id->route.addr; 1400 1401 if (!net_dev) 1402 /* This request is an AF_IB request or a RoCE request */ 1403 return (!id->port_num || id->port_num == port_num) && 1404 (addr->src_addr.ss_family == AF_IB || 1405 cma_protocol_roce_dev_port(id->device, port_num)); 1406 1407 return !addr->dev_addr.bound_dev_if || 1408 (net_eq(dev_net(net_dev), addr->dev_addr.net) && 1409 addr->dev_addr.bound_dev_if == net_dev->ifindex); 1410 } 1411 1412 static struct rdma_id_private *cma_find_listener( 1413 const struct rdma_bind_list *bind_list, 1414 const struct ib_cm_id *cm_id, 1415 const struct ib_cm_event *ib_event, 1416 const struct cma_req_info *req, 1417 const struct net_device *net_dev) 1418 { 1419 struct rdma_id_private *id_priv, *id_priv_dev; 1420 1421 if (!bind_list) 1422 return ERR_PTR(-EINVAL); 1423 1424 hlist_for_each_entry(id_priv, &bind_list->owners, node) { 1425 if (cma_match_private_data(id_priv, ib_event->private_data)) { 1426 if (id_priv->id.device == cm_id->device && 1427 cma_match_net_dev(&id_priv->id, net_dev, req->port)) 1428 return id_priv; 1429 list_for_each_entry(id_priv_dev, 1430 &id_priv->listen_list, 1431 listen_list) { 1432 if (id_priv_dev->id.device == cm_id->device && 1433 cma_match_net_dev(&id_priv_dev->id, net_dev, req->port)) 1434 return id_priv_dev; 1435 } 1436 } 1437 } 1438 1439 return ERR_PTR(-EINVAL); 1440 } 1441 1442 static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id, 1443 struct ib_cm_event *ib_event, 1444 struct net_device **net_dev) 1445 { 1446 struct cma_req_info req; 1447 struct rdma_bind_list *bind_list; 1448 struct rdma_id_private *id_priv; 1449 int err; 1450 1451 err = cma_save_req_info(ib_event, &req); 1452 if (err) 1453 return ERR_PTR(err); 1454 1455 *net_dev = cma_get_net_dev(ib_event, &req); 1456 if (IS_ERR(*net_dev)) { 1457 if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) { 1458 /* Assuming the protocol is AF_IB */ 1459 *net_dev = NULL; 1460 } else if (cma_protocol_roce_dev_port(req.device, req.port)) { 1461 /* TODO find the net dev matching the request parameters 1462 * through the RoCE GID table */ 1463 *net_dev = NULL; 1464 } else { 1465 return ERR_CAST(*net_dev); 1466 } 1467 } 1468 1469 bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net, 1470 rdma_ps_from_service_id(req.service_id), 1471 cma_port_from_service_id(req.service_id)); 1472 id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev); 1473 if (IS_ERR(id_priv) && *net_dev) { 1474 dev_put(*net_dev); 1475 *net_dev = NULL; 1476 } 1477 1478 return id_priv; 1479 } 1480 1481 static inline int cma_user_data_offset(struct rdma_id_private *id_priv) 1482 { 1483 return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr); 1484 } 1485 1486 static void cma_cancel_route(struct rdma_id_private *id_priv) 1487 { 1488 if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) { 1489 if (id_priv->query) 1490 ib_sa_cancel_query(id_priv->query_id, id_priv->query); 1491 } 1492 } 1493 1494 static void cma_cancel_listens(struct rdma_id_private *id_priv) 1495 { 1496 struct rdma_id_private *dev_id_priv; 1497 1498 /* 1499 * Remove from listen_any_list to prevent added devices from spawning 1500 * additional listen requests. 1501 */ 1502 mutex_lock(&lock); 1503 list_del(&id_priv->list); 1504 1505 while (!list_empty(&id_priv->listen_list)) { 1506 dev_id_priv = list_entry(id_priv->listen_list.next, 1507 struct rdma_id_private, listen_list); 1508 /* sync with device removal to avoid duplicate destruction */ 1509 list_del_init(&dev_id_priv->list); 1510 list_del(&dev_id_priv->listen_list); 1511 mutex_unlock(&lock); 1512 1513 rdma_destroy_id(&dev_id_priv->id); 1514 mutex_lock(&lock); 1515 } 1516 mutex_unlock(&lock); 1517 } 1518 1519 static void cma_cancel_operation(struct rdma_id_private *id_priv, 1520 enum rdma_cm_state state) 1521 { 1522 switch (state) { 1523 case RDMA_CM_ADDR_QUERY: 1524 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); 1525 break; 1526 case RDMA_CM_ROUTE_QUERY: 1527 cma_cancel_route(id_priv); 1528 break; 1529 case RDMA_CM_LISTEN: 1530 if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev) 1531 cma_cancel_listens(id_priv); 1532 break; 1533 default: 1534 break; 1535 } 1536 } 1537 1538 static void cma_release_port(struct rdma_id_private *id_priv) 1539 { 1540 struct rdma_bind_list *bind_list = id_priv->bind_list; 1541 struct net *net = id_priv->id.route.addr.dev_addr.net; 1542 1543 if (!bind_list) 1544 return; 1545 1546 mutex_lock(&lock); 1547 hlist_del(&id_priv->node); 1548 if (hlist_empty(&bind_list->owners)) { 1549 cma_ps_remove(net, bind_list->ps, bind_list->port); 1550 kfree(bind_list); 1551 } 1552 mutex_unlock(&lock); 1553 } 1554 1555 static void cma_leave_mc_groups(struct rdma_id_private *id_priv) 1556 { 1557 struct cma_multicast *mc; 1558 1559 while (!list_empty(&id_priv->mc_list)) { 1560 mc = container_of(id_priv->mc_list.next, 1561 struct cma_multicast, list); 1562 list_del(&mc->list); 1563 if (rdma_cap_ib_mcast(id_priv->cma_dev->device, 1564 id_priv->id.port_num)) { 1565 ib_sa_free_multicast(mc->multicast.ib); 1566 kfree(mc); 1567 } else { 1568 if (mc->igmp_joined) { 1569 struct rdma_dev_addr *dev_addr = 1570 &id_priv->id.route.addr.dev_addr; 1571 struct net_device *ndev = NULL; 1572 1573 if (dev_addr->bound_dev_if) 1574 ndev = dev_get_by_index(&init_net, 1575 dev_addr->bound_dev_if); 1576 if (ndev) { 1577 cma_igmp_send(ndev, 1578 &mc->multicast.ib->rec.mgid, 1579 false); 1580 dev_put(ndev); 1581 } 1582 } 1583 kref_put(&mc->mcref, release_mc); 1584 } 1585 } 1586 } 1587 1588 void rdma_destroy_id(struct rdma_cm_id *id) 1589 { 1590 struct rdma_id_private *id_priv; 1591 enum rdma_cm_state state; 1592 1593 id_priv = container_of(id, struct rdma_id_private, id); 1594 state = cma_exch(id_priv, RDMA_CM_DESTROYING); 1595 cma_cancel_operation(id_priv, state); 1596 1597 /* 1598 * Wait for any active callback to finish. New callbacks will find 1599 * the id_priv state set to destroying and abort. 1600 */ 1601 mutex_lock(&id_priv->handler_mutex); 1602 mutex_unlock(&id_priv->handler_mutex); 1603 1604 if (id_priv->cma_dev) { 1605 if (rdma_cap_ib_cm(id_priv->id.device, 1)) { 1606 if (id_priv->cm_id.ib) 1607 ib_destroy_cm_id(id_priv->cm_id.ib); 1608 } else if (rdma_cap_iw_cm(id_priv->id.device, 1)) { 1609 if (id_priv->cm_id.iw) 1610 iw_destroy_cm_id(id_priv->cm_id.iw); 1611 } 1612 cma_leave_mc_groups(id_priv); 1613 cma_release_dev(id_priv); 1614 } 1615 1616 cma_release_port(id_priv); 1617 cma_deref_id(id_priv); 1618 wait_for_completion(&id_priv->comp); 1619 1620 if (id_priv->internal_id) 1621 cma_deref_id(id_priv->id.context); 1622 1623 kfree(id_priv->id.route.path_rec); 1624 put_net(id_priv->id.route.addr.dev_addr.net); 1625 kfree(id_priv); 1626 } 1627 EXPORT_SYMBOL(rdma_destroy_id); 1628 1629 static int cma_rep_recv(struct rdma_id_private *id_priv) 1630 { 1631 int ret; 1632 1633 ret = cma_modify_qp_rtr(id_priv, NULL); 1634 if (ret) 1635 goto reject; 1636 1637 ret = cma_modify_qp_rts(id_priv, NULL); 1638 if (ret) 1639 goto reject; 1640 1641 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0); 1642 if (ret) 1643 goto reject; 1644 1645 return 0; 1646 reject: 1647 cma_modify_qp_err(id_priv); 1648 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, 1649 NULL, 0, NULL, 0); 1650 return ret; 1651 } 1652 1653 static void cma_set_rep_event_data(struct rdma_cm_event *event, 1654 struct ib_cm_rep_event_param *rep_data, 1655 void *private_data) 1656 { 1657 event->param.conn.private_data = private_data; 1658 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE; 1659 event->param.conn.responder_resources = rep_data->responder_resources; 1660 event->param.conn.initiator_depth = rep_data->initiator_depth; 1661 event->param.conn.flow_control = rep_data->flow_control; 1662 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count; 1663 event->param.conn.srq = rep_data->srq; 1664 event->param.conn.qp_num = rep_data->remote_qpn; 1665 } 1666 1667 static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 1668 { 1669 struct rdma_id_private *id_priv = cm_id->context; 1670 struct rdma_cm_event event; 1671 int ret = 0; 1672 1673 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && 1674 cma_disable_callback(id_priv, RDMA_CM_CONNECT)) || 1675 (ib_event->event == IB_CM_TIMEWAIT_EXIT && 1676 cma_disable_callback(id_priv, RDMA_CM_DISCONNECT))) 1677 return 0; 1678 1679 memset(&event, 0, sizeof event); 1680 switch (ib_event->event) { 1681 case IB_CM_REQ_ERROR: 1682 case IB_CM_REP_ERROR: 1683 event.event = RDMA_CM_EVENT_UNREACHABLE; 1684 event.status = -ETIMEDOUT; 1685 break; 1686 case IB_CM_REP_RECEIVED: 1687 if (id_priv->id.qp) { 1688 event.status = cma_rep_recv(id_priv); 1689 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR : 1690 RDMA_CM_EVENT_ESTABLISHED; 1691 } else { 1692 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE; 1693 } 1694 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd, 1695 ib_event->private_data); 1696 break; 1697 case IB_CM_RTU_RECEIVED: 1698 case IB_CM_USER_ESTABLISHED: 1699 event.event = RDMA_CM_EVENT_ESTABLISHED; 1700 break; 1701 case IB_CM_DREQ_ERROR: 1702 event.status = -ETIMEDOUT; /* fall through */ 1703 case IB_CM_DREQ_RECEIVED: 1704 case IB_CM_DREP_RECEIVED: 1705 if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT, 1706 RDMA_CM_DISCONNECT)) 1707 goto out; 1708 event.event = RDMA_CM_EVENT_DISCONNECTED; 1709 break; 1710 case IB_CM_TIMEWAIT_EXIT: 1711 event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT; 1712 break; 1713 case IB_CM_MRA_RECEIVED: 1714 /* ignore event */ 1715 goto out; 1716 case IB_CM_REJ_RECEIVED: 1717 cma_modify_qp_err(id_priv); 1718 event.status = ib_event->param.rej_rcvd.reason; 1719 event.event = RDMA_CM_EVENT_REJECTED; 1720 event.param.conn.private_data = ib_event->private_data; 1721 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; 1722 break; 1723 default: 1724 pr_err("RDMA CMA: unexpected IB CM event: %d\n", 1725 ib_event->event); 1726 goto out; 1727 } 1728 1729 ret = id_priv->id.event_handler(&id_priv->id, &event); 1730 if (ret) { 1731 /* Destroy the CM ID by returning a non-zero value. */ 1732 id_priv->cm_id.ib = NULL; 1733 cma_exch(id_priv, RDMA_CM_DESTROYING); 1734 mutex_unlock(&id_priv->handler_mutex); 1735 rdma_destroy_id(&id_priv->id); 1736 return ret; 1737 } 1738 out: 1739 mutex_unlock(&id_priv->handler_mutex); 1740 return ret; 1741 } 1742 1743 static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, 1744 struct ib_cm_event *ib_event, 1745 struct net_device *net_dev) 1746 { 1747 struct rdma_id_private *id_priv; 1748 struct rdma_cm_id *id; 1749 struct rdma_route *rt; 1750 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; 1751 const __be64 service_id = 1752 ib_event->param.req_rcvd.primary_path->service_id; 1753 int ret; 1754 1755 id = rdma_create_id(listen_id->route.addr.dev_addr.net, 1756 listen_id->event_handler, listen_id->context, 1757 listen_id->ps, ib_event->param.req_rcvd.qp_type); 1758 if (IS_ERR(id)) 1759 return NULL; 1760 1761 id_priv = container_of(id, struct rdma_id_private, id); 1762 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, 1763 (struct sockaddr *)&id->route.addr.dst_addr, 1764 listen_id, ib_event, ss_family, service_id)) 1765 goto err; 1766 1767 rt = &id->route; 1768 rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1; 1769 rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths, 1770 GFP_KERNEL); 1771 if (!rt->path_rec) 1772 goto err; 1773 1774 rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path; 1775 if (rt->num_paths == 2) 1776 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; 1777 1778 if (net_dev) { 1779 ret = rdma_copy_addr(&rt->addr.dev_addr, net_dev, NULL); 1780 if (ret) 1781 goto err; 1782 } else { 1783 if (!cma_protocol_roce(listen_id) && 1784 cma_any_addr(cma_src_addr(id_priv))) { 1785 rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND; 1786 rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); 1787 ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey)); 1788 } else if (!cma_any_addr(cma_src_addr(id_priv))) { 1789 ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr); 1790 if (ret) 1791 goto err; 1792 } 1793 } 1794 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); 1795 1796 id_priv->state = RDMA_CM_CONNECT; 1797 return id_priv; 1798 1799 err: 1800 rdma_destroy_id(id); 1801 return NULL; 1802 } 1803 1804 static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id, 1805 struct ib_cm_event *ib_event, 1806 struct net_device *net_dev) 1807 { 1808 struct rdma_id_private *id_priv; 1809 struct rdma_cm_id *id; 1810 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; 1811 struct net *net = listen_id->route.addr.dev_addr.net; 1812 int ret; 1813 1814 id = rdma_create_id(net, listen_id->event_handler, listen_id->context, 1815 listen_id->ps, IB_QPT_UD); 1816 if (IS_ERR(id)) 1817 return NULL; 1818 1819 id_priv = container_of(id, struct rdma_id_private, id); 1820 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, 1821 (struct sockaddr *)&id->route.addr.dst_addr, 1822 listen_id, ib_event, ss_family, 1823 ib_event->param.sidr_req_rcvd.service_id)) 1824 goto err; 1825 1826 if (net_dev) { 1827 ret = rdma_copy_addr(&id->route.addr.dev_addr, net_dev, NULL); 1828 if (ret) 1829 goto err; 1830 } else { 1831 if (!cma_any_addr(cma_src_addr(id_priv))) { 1832 ret = cma_translate_addr(cma_src_addr(id_priv), 1833 &id->route.addr.dev_addr); 1834 if (ret) 1835 goto err; 1836 } 1837 } 1838 1839 id_priv->state = RDMA_CM_CONNECT; 1840 return id_priv; 1841 err: 1842 rdma_destroy_id(id); 1843 return NULL; 1844 } 1845 1846 static void cma_set_req_event_data(struct rdma_cm_event *event, 1847 struct ib_cm_req_event_param *req_data, 1848 void *private_data, int offset) 1849 { 1850 event->param.conn.private_data = private_data + offset; 1851 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset; 1852 event->param.conn.responder_resources = req_data->responder_resources; 1853 event->param.conn.initiator_depth = req_data->initiator_depth; 1854 event->param.conn.flow_control = req_data->flow_control; 1855 event->param.conn.retry_count = req_data->retry_count; 1856 event->param.conn.rnr_retry_count = req_data->rnr_retry_count; 1857 event->param.conn.srq = req_data->srq; 1858 event->param.conn.qp_num = req_data->remote_qpn; 1859 } 1860 1861 static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event) 1862 { 1863 return (((ib_event->event == IB_CM_REQ_RECEIVED) && 1864 (ib_event->param.req_rcvd.qp_type == id->qp_type)) || 1865 ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) && 1866 (id->qp_type == IB_QPT_UD)) || 1867 (!id->qp_type)); 1868 } 1869 1870 static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 1871 { 1872 struct rdma_id_private *listen_id, *conn_id; 1873 struct rdma_cm_event event; 1874 struct net_device *net_dev; 1875 int offset, ret; 1876 1877 listen_id = cma_id_from_event(cm_id, ib_event, &net_dev); 1878 if (IS_ERR(listen_id)) 1879 return PTR_ERR(listen_id); 1880 1881 if (!cma_check_req_qp_type(&listen_id->id, ib_event)) { 1882 ret = -EINVAL; 1883 goto net_dev_put; 1884 } 1885 1886 if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) { 1887 ret = -ECONNABORTED; 1888 goto net_dev_put; 1889 } 1890 1891 memset(&event, 0, sizeof event); 1892 offset = cma_user_data_offset(listen_id); 1893 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 1894 if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) { 1895 conn_id = cma_new_udp_id(&listen_id->id, ib_event, net_dev); 1896 event.param.ud.private_data = ib_event->private_data + offset; 1897 event.param.ud.private_data_len = 1898 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset; 1899 } else { 1900 conn_id = cma_new_conn_id(&listen_id->id, ib_event, net_dev); 1901 cma_set_req_event_data(&event, &ib_event->param.req_rcvd, 1902 ib_event->private_data, offset); 1903 } 1904 if (!conn_id) { 1905 ret = -ENOMEM; 1906 goto err1; 1907 } 1908 1909 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 1910 ret = cma_acquire_dev(conn_id, listen_id); 1911 if (ret) 1912 goto err2; 1913 1914 conn_id->cm_id.ib = cm_id; 1915 cm_id->context = conn_id; 1916 cm_id->cm_handler = cma_ib_handler; 1917 1918 /* 1919 * Protect against the user destroying conn_id from another thread 1920 * until we're done accessing it. 1921 */ 1922 atomic_inc(&conn_id->refcount); 1923 ret = conn_id->id.event_handler(&conn_id->id, &event); 1924 if (ret) 1925 goto err3; 1926 /* 1927 * Acquire mutex to prevent user executing rdma_destroy_id() 1928 * while we're accessing the cm_id. 1929 */ 1930 mutex_lock(&lock); 1931 if (cma_comp(conn_id, RDMA_CM_CONNECT) && 1932 (conn_id->id.qp_type != IB_QPT_UD)) 1933 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); 1934 mutex_unlock(&lock); 1935 mutex_unlock(&conn_id->handler_mutex); 1936 mutex_unlock(&listen_id->handler_mutex); 1937 cma_deref_id(conn_id); 1938 if (net_dev) 1939 dev_put(net_dev); 1940 return 0; 1941 1942 err3: 1943 cma_deref_id(conn_id); 1944 /* Destroy the CM ID by returning a non-zero value. */ 1945 conn_id->cm_id.ib = NULL; 1946 err2: 1947 cma_exch(conn_id, RDMA_CM_DESTROYING); 1948 mutex_unlock(&conn_id->handler_mutex); 1949 err1: 1950 mutex_unlock(&listen_id->handler_mutex); 1951 if (conn_id) 1952 rdma_destroy_id(&conn_id->id); 1953 1954 net_dev_put: 1955 if (net_dev) 1956 dev_put(net_dev); 1957 1958 return ret; 1959 } 1960 1961 __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr) 1962 { 1963 if (addr->sa_family == AF_IB) 1964 return ((struct sockaddr_ib *) addr)->sib_sid; 1965 1966 return cpu_to_be64(((u64)id->ps << 16) + be16_to_cpu(cma_port(addr))); 1967 } 1968 EXPORT_SYMBOL(rdma_get_service_id); 1969 1970 static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) 1971 { 1972 struct rdma_id_private *id_priv = iw_id->context; 1973 struct rdma_cm_event event; 1974 int ret = 0; 1975 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; 1976 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; 1977 1978 if (cma_disable_callback(id_priv, RDMA_CM_CONNECT)) 1979 return 0; 1980 1981 memset(&event, 0, sizeof event); 1982 switch (iw_event->event) { 1983 case IW_CM_EVENT_CLOSE: 1984 event.event = RDMA_CM_EVENT_DISCONNECTED; 1985 break; 1986 case IW_CM_EVENT_CONNECT_REPLY: 1987 memcpy(cma_src_addr(id_priv), laddr, 1988 rdma_addr_size(laddr)); 1989 memcpy(cma_dst_addr(id_priv), raddr, 1990 rdma_addr_size(raddr)); 1991 switch (iw_event->status) { 1992 case 0: 1993 event.event = RDMA_CM_EVENT_ESTABLISHED; 1994 event.param.conn.initiator_depth = iw_event->ird; 1995 event.param.conn.responder_resources = iw_event->ord; 1996 break; 1997 case -ECONNRESET: 1998 case -ECONNREFUSED: 1999 event.event = RDMA_CM_EVENT_REJECTED; 2000 break; 2001 case -ETIMEDOUT: 2002 event.event = RDMA_CM_EVENT_UNREACHABLE; 2003 break; 2004 default: 2005 event.event = RDMA_CM_EVENT_CONNECT_ERROR; 2006 break; 2007 } 2008 break; 2009 case IW_CM_EVENT_ESTABLISHED: 2010 event.event = RDMA_CM_EVENT_ESTABLISHED; 2011 event.param.conn.initiator_depth = iw_event->ird; 2012 event.param.conn.responder_resources = iw_event->ord; 2013 break; 2014 default: 2015 BUG_ON(1); 2016 } 2017 2018 event.status = iw_event->status; 2019 event.param.conn.private_data = iw_event->private_data; 2020 event.param.conn.private_data_len = iw_event->private_data_len; 2021 ret = id_priv->id.event_handler(&id_priv->id, &event); 2022 if (ret) { 2023 /* Destroy the CM ID by returning a non-zero value. */ 2024 id_priv->cm_id.iw = NULL; 2025 cma_exch(id_priv, RDMA_CM_DESTROYING); 2026 mutex_unlock(&id_priv->handler_mutex); 2027 rdma_destroy_id(&id_priv->id); 2028 return ret; 2029 } 2030 2031 mutex_unlock(&id_priv->handler_mutex); 2032 return ret; 2033 } 2034 2035 static int iw_conn_req_handler(struct iw_cm_id *cm_id, 2036 struct iw_cm_event *iw_event) 2037 { 2038 struct rdma_cm_id *new_cm_id; 2039 struct rdma_id_private *listen_id, *conn_id; 2040 struct rdma_cm_event event; 2041 int ret; 2042 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; 2043 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; 2044 2045 listen_id = cm_id->context; 2046 if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) 2047 return -ECONNABORTED; 2048 2049 /* Create a new RDMA id for the new IW CM ID */ 2050 new_cm_id = rdma_create_id(listen_id->id.route.addr.dev_addr.net, 2051 listen_id->id.event_handler, 2052 listen_id->id.context, 2053 RDMA_PS_TCP, IB_QPT_RC); 2054 if (IS_ERR(new_cm_id)) { 2055 ret = -ENOMEM; 2056 goto out; 2057 } 2058 conn_id = container_of(new_cm_id, struct rdma_id_private, id); 2059 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 2060 conn_id->state = RDMA_CM_CONNECT; 2061 2062 ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr, NULL); 2063 if (ret) { 2064 mutex_unlock(&conn_id->handler_mutex); 2065 rdma_destroy_id(new_cm_id); 2066 goto out; 2067 } 2068 2069 ret = cma_acquire_dev(conn_id, listen_id); 2070 if (ret) { 2071 mutex_unlock(&conn_id->handler_mutex); 2072 rdma_destroy_id(new_cm_id); 2073 goto out; 2074 } 2075 2076 conn_id->cm_id.iw = cm_id; 2077 cm_id->context = conn_id; 2078 cm_id->cm_handler = cma_iw_handler; 2079 2080 memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr)); 2081 memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr)); 2082 2083 memset(&event, 0, sizeof event); 2084 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 2085 event.param.conn.private_data = iw_event->private_data; 2086 event.param.conn.private_data_len = iw_event->private_data_len; 2087 event.param.conn.initiator_depth = iw_event->ird; 2088 event.param.conn.responder_resources = iw_event->ord; 2089 2090 /* 2091 * Protect against the user destroying conn_id from another thread 2092 * until we're done accessing it. 2093 */ 2094 atomic_inc(&conn_id->refcount); 2095 ret = conn_id->id.event_handler(&conn_id->id, &event); 2096 if (ret) { 2097 /* User wants to destroy the CM ID */ 2098 conn_id->cm_id.iw = NULL; 2099 cma_exch(conn_id, RDMA_CM_DESTROYING); 2100 mutex_unlock(&conn_id->handler_mutex); 2101 cma_deref_id(conn_id); 2102 rdma_destroy_id(&conn_id->id); 2103 goto out; 2104 } 2105 2106 mutex_unlock(&conn_id->handler_mutex); 2107 cma_deref_id(conn_id); 2108 2109 out: 2110 mutex_unlock(&listen_id->handler_mutex); 2111 return ret; 2112 } 2113 2114 static int cma_ib_listen(struct rdma_id_private *id_priv) 2115 { 2116 struct sockaddr *addr; 2117 struct ib_cm_id *id; 2118 __be64 svc_id; 2119 2120 addr = cma_src_addr(id_priv); 2121 svc_id = rdma_get_service_id(&id_priv->id, addr); 2122 id = ib_cm_insert_listen(id_priv->id.device, cma_req_handler, svc_id); 2123 if (IS_ERR(id)) 2124 return PTR_ERR(id); 2125 id_priv->cm_id.ib = id; 2126 2127 return 0; 2128 } 2129 2130 static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) 2131 { 2132 int ret; 2133 struct iw_cm_id *id; 2134 2135 id = iw_create_cm_id(id_priv->id.device, 2136 iw_conn_req_handler, 2137 id_priv); 2138 if (IS_ERR(id)) 2139 return PTR_ERR(id); 2140 2141 id->tos = id_priv->tos; 2142 id_priv->cm_id.iw = id; 2143 2144 memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv), 2145 rdma_addr_size(cma_src_addr(id_priv))); 2146 2147 ret = iw_cm_listen(id_priv->cm_id.iw, backlog); 2148 2149 if (ret) { 2150 iw_destroy_cm_id(id_priv->cm_id.iw); 2151 id_priv->cm_id.iw = NULL; 2152 } 2153 2154 return ret; 2155 } 2156 2157 static int cma_listen_handler(struct rdma_cm_id *id, 2158 struct rdma_cm_event *event) 2159 { 2160 struct rdma_id_private *id_priv = id->context; 2161 2162 id->context = id_priv->id.context; 2163 id->event_handler = id_priv->id.event_handler; 2164 return id_priv->id.event_handler(id, event); 2165 } 2166 2167 static void cma_listen_on_dev(struct rdma_id_private *id_priv, 2168 struct cma_device *cma_dev) 2169 { 2170 struct rdma_id_private *dev_id_priv; 2171 struct rdma_cm_id *id; 2172 struct net *net = id_priv->id.route.addr.dev_addr.net; 2173 int ret; 2174 2175 if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) 2176 return; 2177 2178 id = rdma_create_id(net, cma_listen_handler, id_priv, id_priv->id.ps, 2179 id_priv->id.qp_type); 2180 if (IS_ERR(id)) 2181 return; 2182 2183 dev_id_priv = container_of(id, struct rdma_id_private, id); 2184 2185 dev_id_priv->state = RDMA_CM_ADDR_BOUND; 2186 memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv), 2187 rdma_addr_size(cma_src_addr(id_priv))); 2188 2189 _cma_attach_to_dev(dev_id_priv, cma_dev); 2190 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); 2191 atomic_inc(&id_priv->refcount); 2192 dev_id_priv->internal_id = 1; 2193 dev_id_priv->afonly = id_priv->afonly; 2194 2195 ret = rdma_listen(id, id_priv->backlog); 2196 if (ret) 2197 pr_warn("RDMA CMA: cma_listen_on_dev, error %d, listening on device %s\n", 2198 ret, cma_dev->device->name); 2199 } 2200 2201 static void cma_listen_on_all(struct rdma_id_private *id_priv) 2202 { 2203 struct cma_device *cma_dev; 2204 2205 mutex_lock(&lock); 2206 list_add_tail(&id_priv->list, &listen_any_list); 2207 list_for_each_entry(cma_dev, &dev_list, list) 2208 cma_listen_on_dev(id_priv, cma_dev); 2209 mutex_unlock(&lock); 2210 } 2211 2212 void rdma_set_service_type(struct rdma_cm_id *id, int tos) 2213 { 2214 struct rdma_id_private *id_priv; 2215 2216 id_priv = container_of(id, struct rdma_id_private, id); 2217 id_priv->tos = (u8) tos; 2218 } 2219 EXPORT_SYMBOL(rdma_set_service_type); 2220 2221 static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec, 2222 void *context) 2223 { 2224 struct cma_work *work = context; 2225 struct rdma_route *route; 2226 2227 route = &work->id->id.route; 2228 2229 if (!status) { 2230 route->num_paths = 1; 2231 *route->path_rec = *path_rec; 2232 } else { 2233 work->old_state = RDMA_CM_ROUTE_QUERY; 2234 work->new_state = RDMA_CM_ADDR_RESOLVED; 2235 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; 2236 work->event.status = status; 2237 } 2238 2239 queue_work(cma_wq, &work->work); 2240 } 2241 2242 static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, 2243 struct cma_work *work) 2244 { 2245 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 2246 struct ib_sa_path_rec path_rec; 2247 ib_sa_comp_mask comp_mask; 2248 struct sockaddr_in6 *sin6; 2249 struct sockaddr_ib *sib; 2250 2251 memset(&path_rec, 0, sizeof path_rec); 2252 rdma_addr_get_sgid(dev_addr, &path_rec.sgid); 2253 rdma_addr_get_dgid(dev_addr, &path_rec.dgid); 2254 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 2255 path_rec.numb_path = 1; 2256 path_rec.reversible = 1; 2257 path_rec.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); 2258 2259 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | 2260 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | 2261 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID; 2262 2263 switch (cma_family(id_priv)) { 2264 case AF_INET: 2265 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); 2266 comp_mask |= IB_SA_PATH_REC_QOS_CLASS; 2267 break; 2268 case AF_INET6: 2269 sin6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); 2270 path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20); 2271 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; 2272 break; 2273 case AF_IB: 2274 sib = (struct sockaddr_ib *) cma_src_addr(id_priv); 2275 path_rec.traffic_class = (u8) (be32_to_cpu(sib->sib_flowinfo) >> 20); 2276 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; 2277 break; 2278 } 2279 2280 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, 2281 id_priv->id.port_num, &path_rec, 2282 comp_mask, timeout_ms, 2283 GFP_KERNEL, cma_query_handler, 2284 work, &id_priv->query); 2285 2286 return (id_priv->query_id < 0) ? id_priv->query_id : 0; 2287 } 2288 2289 static void cma_work_handler(struct work_struct *_work) 2290 { 2291 struct cma_work *work = container_of(_work, struct cma_work, work); 2292 struct rdma_id_private *id_priv = work->id; 2293 int destroy = 0; 2294 2295 mutex_lock(&id_priv->handler_mutex); 2296 if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) 2297 goto out; 2298 2299 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 2300 cma_exch(id_priv, RDMA_CM_DESTROYING); 2301 destroy = 1; 2302 } 2303 out: 2304 mutex_unlock(&id_priv->handler_mutex); 2305 cma_deref_id(id_priv); 2306 if (destroy) 2307 rdma_destroy_id(&id_priv->id); 2308 kfree(work); 2309 } 2310 2311 static void cma_ndev_work_handler(struct work_struct *_work) 2312 { 2313 struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work); 2314 struct rdma_id_private *id_priv = work->id; 2315 int destroy = 0; 2316 2317 mutex_lock(&id_priv->handler_mutex); 2318 if (id_priv->state == RDMA_CM_DESTROYING || 2319 id_priv->state == RDMA_CM_DEVICE_REMOVAL) 2320 goto out; 2321 2322 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 2323 cma_exch(id_priv, RDMA_CM_DESTROYING); 2324 destroy = 1; 2325 } 2326 2327 out: 2328 mutex_unlock(&id_priv->handler_mutex); 2329 cma_deref_id(id_priv); 2330 if (destroy) 2331 rdma_destroy_id(&id_priv->id); 2332 kfree(work); 2333 } 2334 2335 static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) 2336 { 2337 struct rdma_route *route = &id_priv->id.route; 2338 struct cma_work *work; 2339 int ret; 2340 2341 work = kzalloc(sizeof *work, GFP_KERNEL); 2342 if (!work) 2343 return -ENOMEM; 2344 2345 work->id = id_priv; 2346 INIT_WORK(&work->work, cma_work_handler); 2347 work->old_state = RDMA_CM_ROUTE_QUERY; 2348 work->new_state = RDMA_CM_ROUTE_RESOLVED; 2349 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 2350 2351 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); 2352 if (!route->path_rec) { 2353 ret = -ENOMEM; 2354 goto err1; 2355 } 2356 2357 ret = cma_query_ib_route(id_priv, timeout_ms, work); 2358 if (ret) 2359 goto err2; 2360 2361 return 0; 2362 err2: 2363 kfree(route->path_rec); 2364 route->path_rec = NULL; 2365 err1: 2366 kfree(work); 2367 return ret; 2368 } 2369 2370 int rdma_set_ib_paths(struct rdma_cm_id *id, 2371 struct ib_sa_path_rec *path_rec, int num_paths) 2372 { 2373 struct rdma_id_private *id_priv; 2374 int ret; 2375 2376 id_priv = container_of(id, struct rdma_id_private, id); 2377 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, 2378 RDMA_CM_ROUTE_RESOLVED)) 2379 return -EINVAL; 2380 2381 id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths, 2382 GFP_KERNEL); 2383 if (!id->route.path_rec) { 2384 ret = -ENOMEM; 2385 goto err; 2386 } 2387 2388 id->route.num_paths = num_paths; 2389 return 0; 2390 err: 2391 cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED); 2392 return ret; 2393 } 2394 EXPORT_SYMBOL(rdma_set_ib_paths); 2395 2396 static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms) 2397 { 2398 struct cma_work *work; 2399 2400 work = kzalloc(sizeof *work, GFP_KERNEL); 2401 if (!work) 2402 return -ENOMEM; 2403 2404 work->id = id_priv; 2405 INIT_WORK(&work->work, cma_work_handler); 2406 work->old_state = RDMA_CM_ROUTE_QUERY; 2407 work->new_state = RDMA_CM_ROUTE_RESOLVED; 2408 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 2409 queue_work(cma_wq, &work->work); 2410 return 0; 2411 } 2412 2413 static int iboe_tos_to_sl(struct net_device *ndev, int tos) 2414 { 2415 int prio; 2416 struct net_device *dev; 2417 2418 prio = rt_tos2priority(tos); 2419 dev = ndev->priv_flags & IFF_802_1Q_VLAN ? 2420 vlan_dev_real_dev(ndev) : ndev; 2421 2422 if (dev->num_tc) 2423 return netdev_get_prio_tc_map(dev, prio); 2424 2425 #if IS_ENABLED(CONFIG_VLAN_8021Q) 2426 if (ndev->priv_flags & IFF_802_1Q_VLAN) 2427 return (vlan_dev_get_egress_qos_mask(ndev, prio) & 2428 VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; 2429 #endif 2430 return 0; 2431 } 2432 2433 static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) 2434 { 2435 struct rdma_route *route = &id_priv->id.route; 2436 struct rdma_addr *addr = &route->addr; 2437 struct cma_work *work; 2438 int ret; 2439 struct net_device *ndev = NULL; 2440 2441 2442 work = kzalloc(sizeof *work, GFP_KERNEL); 2443 if (!work) 2444 return -ENOMEM; 2445 2446 work->id = id_priv; 2447 INIT_WORK(&work->work, cma_work_handler); 2448 2449 route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL); 2450 if (!route->path_rec) { 2451 ret = -ENOMEM; 2452 goto err1; 2453 } 2454 2455 route->num_paths = 1; 2456 2457 if (addr->dev_addr.bound_dev_if) { 2458 ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if); 2459 if (!ndev) 2460 return -ENODEV; 2461 2462 if (ndev->flags & IFF_LOOPBACK) { 2463 dev_put(ndev); 2464 if (!id_priv->id.device->get_netdev) 2465 return -EOPNOTSUPP; 2466 2467 ndev = id_priv->id.device->get_netdev(id_priv->id.device, 2468 id_priv->id.port_num); 2469 if (!ndev) 2470 return -ENODEV; 2471 } 2472 2473 route->path_rec->net = &init_net; 2474 route->path_rec->ifindex = ndev->ifindex; 2475 route->path_rec->gid_type = id_priv->gid_type; 2476 } 2477 if (!ndev) { 2478 ret = -ENODEV; 2479 goto err2; 2480 } 2481 2482 memcpy(route->path_rec->dmac, addr->dev_addr.dst_dev_addr, ETH_ALEN); 2483 2484 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, 2485 &route->path_rec->sgid); 2486 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr, 2487 &route->path_rec->dgid); 2488 2489 /* Use the hint from IP Stack to select GID Type */ 2490 if (route->path_rec->gid_type < ib_network_to_gid_type(addr->dev_addr.network)) 2491 route->path_rec->gid_type = ib_network_to_gid_type(addr->dev_addr.network); 2492 if (((struct sockaddr *)&id_priv->id.route.addr.dst_addr)->sa_family != AF_IB) 2493 /* TODO: get the hoplimit from the inet/inet6 device */ 2494 route->path_rec->hop_limit = addr->dev_addr.hoplimit; 2495 else 2496 route->path_rec->hop_limit = 1; 2497 route->path_rec->reversible = 1; 2498 route->path_rec->pkey = cpu_to_be16(0xffff); 2499 route->path_rec->mtu_selector = IB_SA_EQ; 2500 route->path_rec->sl = iboe_tos_to_sl(ndev, id_priv->tos); 2501 route->path_rec->mtu = iboe_get_mtu(ndev->mtu); 2502 route->path_rec->rate_selector = IB_SA_EQ; 2503 route->path_rec->rate = iboe_get_rate(ndev); 2504 dev_put(ndev); 2505 route->path_rec->packet_life_time_selector = IB_SA_EQ; 2506 route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME; 2507 if (!route->path_rec->mtu) { 2508 ret = -EINVAL; 2509 goto err2; 2510 } 2511 2512 work->old_state = RDMA_CM_ROUTE_QUERY; 2513 work->new_state = RDMA_CM_ROUTE_RESOLVED; 2514 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 2515 work->event.status = 0; 2516 2517 queue_work(cma_wq, &work->work); 2518 2519 return 0; 2520 2521 err2: 2522 kfree(route->path_rec); 2523 route->path_rec = NULL; 2524 err1: 2525 kfree(work); 2526 return ret; 2527 } 2528 2529 int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) 2530 { 2531 struct rdma_id_private *id_priv; 2532 int ret; 2533 2534 id_priv = container_of(id, struct rdma_id_private, id); 2535 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY)) 2536 return -EINVAL; 2537 2538 atomic_inc(&id_priv->refcount); 2539 if (rdma_cap_ib_sa(id->device, id->port_num)) 2540 ret = cma_resolve_ib_route(id_priv, timeout_ms); 2541 else if (rdma_protocol_roce(id->device, id->port_num)) 2542 ret = cma_resolve_iboe_route(id_priv); 2543 else if (rdma_protocol_iwarp(id->device, id->port_num)) 2544 ret = cma_resolve_iw_route(id_priv, timeout_ms); 2545 else 2546 ret = -ENOSYS; 2547 2548 if (ret) 2549 goto err; 2550 2551 return 0; 2552 err: 2553 cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED); 2554 cma_deref_id(id_priv); 2555 return ret; 2556 } 2557 EXPORT_SYMBOL(rdma_resolve_route); 2558 2559 static void cma_set_loopback(struct sockaddr *addr) 2560 { 2561 switch (addr->sa_family) { 2562 case AF_INET: 2563 ((struct sockaddr_in *) addr)->sin_addr.s_addr = htonl(INADDR_LOOPBACK); 2564 break; 2565 case AF_INET6: 2566 ipv6_addr_set(&((struct sockaddr_in6 *) addr)->sin6_addr, 2567 0, 0, 0, htonl(1)); 2568 break; 2569 default: 2570 ib_addr_set(&((struct sockaddr_ib *) addr)->sib_addr, 2571 0, 0, 0, htonl(1)); 2572 break; 2573 } 2574 } 2575 2576 static int cma_bind_loopback(struct rdma_id_private *id_priv) 2577 { 2578 struct cma_device *cma_dev, *cur_dev; 2579 struct ib_port_attr port_attr; 2580 union ib_gid gid; 2581 u16 pkey; 2582 int ret; 2583 u8 p; 2584 2585 cma_dev = NULL; 2586 mutex_lock(&lock); 2587 list_for_each_entry(cur_dev, &dev_list, list) { 2588 if (cma_family(id_priv) == AF_IB && 2589 !rdma_cap_ib_cm(cur_dev->device, 1)) 2590 continue; 2591 2592 if (!cma_dev) 2593 cma_dev = cur_dev; 2594 2595 for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { 2596 if (!ib_query_port(cur_dev->device, p, &port_attr) && 2597 port_attr.state == IB_PORT_ACTIVE) { 2598 cma_dev = cur_dev; 2599 goto port_found; 2600 } 2601 } 2602 } 2603 2604 if (!cma_dev) { 2605 ret = -ENODEV; 2606 goto out; 2607 } 2608 2609 p = 1; 2610 2611 port_found: 2612 ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid, NULL); 2613 if (ret) 2614 goto out; 2615 2616 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey); 2617 if (ret) 2618 goto out; 2619 2620 id_priv->id.route.addr.dev_addr.dev_type = 2621 (rdma_protocol_ib(cma_dev->device, p)) ? 2622 ARPHRD_INFINIBAND : ARPHRD_ETHER; 2623 2624 rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); 2625 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); 2626 id_priv->id.port_num = p; 2627 cma_attach_to_dev(id_priv, cma_dev); 2628 cma_set_loopback(cma_src_addr(id_priv)); 2629 out: 2630 mutex_unlock(&lock); 2631 return ret; 2632 } 2633 2634 static void addr_handler(int status, struct sockaddr *src_addr, 2635 struct rdma_dev_addr *dev_addr, void *context) 2636 { 2637 struct rdma_id_private *id_priv = context; 2638 struct rdma_cm_event event; 2639 2640 memset(&event, 0, sizeof event); 2641 mutex_lock(&id_priv->handler_mutex); 2642 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, 2643 RDMA_CM_ADDR_RESOLVED)) 2644 goto out; 2645 2646 memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr)); 2647 if (!status && !id_priv->cma_dev) 2648 status = cma_acquire_dev(id_priv, NULL); 2649 2650 if (status) { 2651 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, 2652 RDMA_CM_ADDR_BOUND)) 2653 goto out; 2654 event.event = RDMA_CM_EVENT_ADDR_ERROR; 2655 event.status = status; 2656 } else 2657 event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 2658 2659 if (id_priv->id.event_handler(&id_priv->id, &event)) { 2660 cma_exch(id_priv, RDMA_CM_DESTROYING); 2661 mutex_unlock(&id_priv->handler_mutex); 2662 cma_deref_id(id_priv); 2663 rdma_destroy_id(&id_priv->id); 2664 return; 2665 } 2666 out: 2667 mutex_unlock(&id_priv->handler_mutex); 2668 cma_deref_id(id_priv); 2669 } 2670 2671 static int cma_resolve_loopback(struct rdma_id_private *id_priv) 2672 { 2673 struct cma_work *work; 2674 union ib_gid gid; 2675 int ret; 2676 2677 work = kzalloc(sizeof *work, GFP_KERNEL); 2678 if (!work) 2679 return -ENOMEM; 2680 2681 if (!id_priv->cma_dev) { 2682 ret = cma_bind_loopback(id_priv); 2683 if (ret) 2684 goto err; 2685 } 2686 2687 rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); 2688 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); 2689 2690 work->id = id_priv; 2691 INIT_WORK(&work->work, cma_work_handler); 2692 work->old_state = RDMA_CM_ADDR_QUERY; 2693 work->new_state = RDMA_CM_ADDR_RESOLVED; 2694 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 2695 queue_work(cma_wq, &work->work); 2696 return 0; 2697 err: 2698 kfree(work); 2699 return ret; 2700 } 2701 2702 static int cma_resolve_ib_addr(struct rdma_id_private *id_priv) 2703 { 2704 struct cma_work *work; 2705 int ret; 2706 2707 work = kzalloc(sizeof *work, GFP_KERNEL); 2708 if (!work) 2709 return -ENOMEM; 2710 2711 if (!id_priv->cma_dev) { 2712 ret = cma_resolve_ib_dev(id_priv); 2713 if (ret) 2714 goto err; 2715 } 2716 2717 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *) 2718 &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr)); 2719 2720 work->id = id_priv; 2721 INIT_WORK(&work->work, cma_work_handler); 2722 work->old_state = RDMA_CM_ADDR_QUERY; 2723 work->new_state = RDMA_CM_ADDR_RESOLVED; 2724 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 2725 queue_work(cma_wq, &work->work); 2726 return 0; 2727 err: 2728 kfree(work); 2729 return ret; 2730 } 2731 2732 static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 2733 struct sockaddr *dst_addr) 2734 { 2735 if (!src_addr || !src_addr->sa_family) { 2736 src_addr = (struct sockaddr *) &id->route.addr.src_addr; 2737 src_addr->sa_family = dst_addr->sa_family; 2738 if (dst_addr->sa_family == AF_INET6) { 2739 struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr; 2740 struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr; 2741 src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; 2742 if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) 2743 id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id; 2744 } else if (dst_addr->sa_family == AF_IB) { 2745 ((struct sockaddr_ib *) src_addr)->sib_pkey = 2746 ((struct sockaddr_ib *) dst_addr)->sib_pkey; 2747 } 2748 } 2749 return rdma_bind_addr(id, src_addr); 2750 } 2751 2752 int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 2753 struct sockaddr *dst_addr, int timeout_ms) 2754 { 2755 struct rdma_id_private *id_priv; 2756 int ret; 2757 2758 id_priv = container_of(id, struct rdma_id_private, id); 2759 if (id_priv->state == RDMA_CM_IDLE) { 2760 ret = cma_bind_addr(id, src_addr, dst_addr); 2761 if (ret) 2762 return ret; 2763 } 2764 2765 if (cma_family(id_priv) != dst_addr->sa_family) 2766 return -EINVAL; 2767 2768 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) 2769 return -EINVAL; 2770 2771 atomic_inc(&id_priv->refcount); 2772 memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr)); 2773 if (cma_any_addr(dst_addr)) { 2774 ret = cma_resolve_loopback(id_priv); 2775 } else { 2776 if (dst_addr->sa_family == AF_IB) { 2777 ret = cma_resolve_ib_addr(id_priv); 2778 } else { 2779 ret = rdma_resolve_ip(&addr_client, cma_src_addr(id_priv), 2780 dst_addr, &id->route.addr.dev_addr, 2781 timeout_ms, addr_handler, id_priv); 2782 } 2783 } 2784 if (ret) 2785 goto err; 2786 2787 return 0; 2788 err: 2789 cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); 2790 cma_deref_id(id_priv); 2791 return ret; 2792 } 2793 EXPORT_SYMBOL(rdma_resolve_addr); 2794 2795 int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse) 2796 { 2797 struct rdma_id_private *id_priv; 2798 unsigned long flags; 2799 int ret; 2800 2801 id_priv = container_of(id, struct rdma_id_private, id); 2802 spin_lock_irqsave(&id_priv->lock, flags); 2803 if (reuse || id_priv->state == RDMA_CM_IDLE) { 2804 id_priv->reuseaddr = reuse; 2805 ret = 0; 2806 } else { 2807 ret = -EINVAL; 2808 } 2809 spin_unlock_irqrestore(&id_priv->lock, flags); 2810 return ret; 2811 } 2812 EXPORT_SYMBOL(rdma_set_reuseaddr); 2813 2814 int rdma_set_afonly(struct rdma_cm_id *id, int afonly) 2815 { 2816 struct rdma_id_private *id_priv; 2817 unsigned long flags; 2818 int ret; 2819 2820 id_priv = container_of(id, struct rdma_id_private, id); 2821 spin_lock_irqsave(&id_priv->lock, flags); 2822 if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) { 2823 id_priv->options |= (1 << CMA_OPTION_AFONLY); 2824 id_priv->afonly = afonly; 2825 ret = 0; 2826 } else { 2827 ret = -EINVAL; 2828 } 2829 spin_unlock_irqrestore(&id_priv->lock, flags); 2830 return ret; 2831 } 2832 EXPORT_SYMBOL(rdma_set_afonly); 2833 2834 static void cma_bind_port(struct rdma_bind_list *bind_list, 2835 struct rdma_id_private *id_priv) 2836 { 2837 struct sockaddr *addr; 2838 struct sockaddr_ib *sib; 2839 u64 sid, mask; 2840 __be16 port; 2841 2842 addr = cma_src_addr(id_priv); 2843 port = htons(bind_list->port); 2844 2845 switch (addr->sa_family) { 2846 case AF_INET: 2847 ((struct sockaddr_in *) addr)->sin_port = port; 2848 break; 2849 case AF_INET6: 2850 ((struct sockaddr_in6 *) addr)->sin6_port = port; 2851 break; 2852 case AF_IB: 2853 sib = (struct sockaddr_ib *) addr; 2854 sid = be64_to_cpu(sib->sib_sid); 2855 mask = be64_to_cpu(sib->sib_sid_mask); 2856 sib->sib_sid = cpu_to_be64((sid & mask) | (u64) ntohs(port)); 2857 sib->sib_sid_mask = cpu_to_be64(~0ULL); 2858 break; 2859 } 2860 id_priv->bind_list = bind_list; 2861 hlist_add_head(&id_priv->node, &bind_list->owners); 2862 } 2863 2864 static int cma_alloc_port(enum rdma_port_space ps, 2865 struct rdma_id_private *id_priv, unsigned short snum) 2866 { 2867 struct rdma_bind_list *bind_list; 2868 int ret; 2869 2870 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); 2871 if (!bind_list) 2872 return -ENOMEM; 2873 2874 ret = cma_ps_alloc(id_priv->id.route.addr.dev_addr.net, ps, bind_list, 2875 snum); 2876 if (ret < 0) 2877 goto err; 2878 2879 bind_list->ps = ps; 2880 bind_list->port = (unsigned short)ret; 2881 cma_bind_port(bind_list, id_priv); 2882 return 0; 2883 err: 2884 kfree(bind_list); 2885 return ret == -ENOSPC ? -EADDRNOTAVAIL : ret; 2886 } 2887 2888 static int cma_alloc_any_port(enum rdma_port_space ps, 2889 struct rdma_id_private *id_priv) 2890 { 2891 static unsigned int last_used_port; 2892 int low, high, remaining; 2893 unsigned int rover; 2894 struct net *net = id_priv->id.route.addr.dev_addr.net; 2895 2896 inet_get_local_port_range(net, &low, &high); 2897 remaining = (high - low) + 1; 2898 rover = prandom_u32() % remaining + low; 2899 retry: 2900 if (last_used_port != rover && 2901 !cma_ps_find(net, ps, (unsigned short)rover)) { 2902 int ret = cma_alloc_port(ps, id_priv, rover); 2903 /* 2904 * Remember previously used port number in order to avoid 2905 * re-using same port immediately after it is closed. 2906 */ 2907 if (!ret) 2908 last_used_port = rover; 2909 if (ret != -EADDRNOTAVAIL) 2910 return ret; 2911 } 2912 if (--remaining) { 2913 rover++; 2914 if ((rover < low) || (rover > high)) 2915 rover = low; 2916 goto retry; 2917 } 2918 return -EADDRNOTAVAIL; 2919 } 2920 2921 /* 2922 * Check that the requested port is available. This is called when trying to 2923 * bind to a specific port, or when trying to listen on a bound port. In 2924 * the latter case, the provided id_priv may already be on the bind_list, but 2925 * we still need to check that it's okay to start listening. 2926 */ 2927 static int cma_check_port(struct rdma_bind_list *bind_list, 2928 struct rdma_id_private *id_priv, uint8_t reuseaddr) 2929 { 2930 struct rdma_id_private *cur_id; 2931 struct sockaddr *addr, *cur_addr; 2932 2933 addr = cma_src_addr(id_priv); 2934 hlist_for_each_entry(cur_id, &bind_list->owners, node) { 2935 if (id_priv == cur_id) 2936 continue; 2937 2938 if ((cur_id->state != RDMA_CM_LISTEN) && reuseaddr && 2939 cur_id->reuseaddr) 2940 continue; 2941 2942 cur_addr = cma_src_addr(cur_id); 2943 if (id_priv->afonly && cur_id->afonly && 2944 (addr->sa_family != cur_addr->sa_family)) 2945 continue; 2946 2947 if (cma_any_addr(addr) || cma_any_addr(cur_addr)) 2948 return -EADDRNOTAVAIL; 2949 2950 if (!cma_addr_cmp(addr, cur_addr)) 2951 return -EADDRINUSE; 2952 } 2953 return 0; 2954 } 2955 2956 static int cma_use_port(enum rdma_port_space ps, 2957 struct rdma_id_private *id_priv) 2958 { 2959 struct rdma_bind_list *bind_list; 2960 unsigned short snum; 2961 int ret; 2962 2963 snum = ntohs(cma_port(cma_src_addr(id_priv))); 2964 if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) 2965 return -EACCES; 2966 2967 bind_list = cma_ps_find(id_priv->id.route.addr.dev_addr.net, ps, snum); 2968 if (!bind_list) { 2969 ret = cma_alloc_port(ps, id_priv, snum); 2970 } else { 2971 ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr); 2972 if (!ret) 2973 cma_bind_port(bind_list, id_priv); 2974 } 2975 return ret; 2976 } 2977 2978 static int cma_bind_listen(struct rdma_id_private *id_priv) 2979 { 2980 struct rdma_bind_list *bind_list = id_priv->bind_list; 2981 int ret = 0; 2982 2983 mutex_lock(&lock); 2984 if (bind_list->owners.first->next) 2985 ret = cma_check_port(bind_list, id_priv, 0); 2986 mutex_unlock(&lock); 2987 return ret; 2988 } 2989 2990 static enum rdma_port_space cma_select_inet_ps( 2991 struct rdma_id_private *id_priv) 2992 { 2993 switch (id_priv->id.ps) { 2994 case RDMA_PS_TCP: 2995 case RDMA_PS_UDP: 2996 case RDMA_PS_IPOIB: 2997 case RDMA_PS_IB: 2998 return id_priv->id.ps; 2999 default: 3000 3001 return 0; 3002 } 3003 } 3004 3005 static enum rdma_port_space cma_select_ib_ps(struct rdma_id_private *id_priv) 3006 { 3007 enum rdma_port_space ps = 0; 3008 struct sockaddr_ib *sib; 3009 u64 sid_ps, mask, sid; 3010 3011 sib = (struct sockaddr_ib *) cma_src_addr(id_priv); 3012 mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK; 3013 sid = be64_to_cpu(sib->sib_sid) & mask; 3014 3015 if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) { 3016 sid_ps = RDMA_IB_IP_PS_IB; 3017 ps = RDMA_PS_IB; 3018 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) && 3019 (sid == (RDMA_IB_IP_PS_TCP & mask))) { 3020 sid_ps = RDMA_IB_IP_PS_TCP; 3021 ps = RDMA_PS_TCP; 3022 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) && 3023 (sid == (RDMA_IB_IP_PS_UDP & mask))) { 3024 sid_ps = RDMA_IB_IP_PS_UDP; 3025 ps = RDMA_PS_UDP; 3026 } 3027 3028 if (ps) { 3029 sib->sib_sid = cpu_to_be64(sid_ps | ntohs(cma_port((struct sockaddr *) sib))); 3030 sib->sib_sid_mask = cpu_to_be64(RDMA_IB_IP_PS_MASK | 3031 be64_to_cpu(sib->sib_sid_mask)); 3032 } 3033 return ps; 3034 } 3035 3036 static int cma_get_port(struct rdma_id_private *id_priv) 3037 { 3038 enum rdma_port_space ps; 3039 int ret; 3040 3041 if (cma_family(id_priv) != AF_IB) 3042 ps = cma_select_inet_ps(id_priv); 3043 else 3044 ps = cma_select_ib_ps(id_priv); 3045 if (!ps) 3046 return -EPROTONOSUPPORT; 3047 3048 mutex_lock(&lock); 3049 if (cma_any_port(cma_src_addr(id_priv))) 3050 ret = cma_alloc_any_port(ps, id_priv); 3051 else 3052 ret = cma_use_port(ps, id_priv); 3053 mutex_unlock(&lock); 3054 3055 return ret; 3056 } 3057 3058 static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, 3059 struct sockaddr *addr) 3060 { 3061 #if IS_ENABLED(CONFIG_IPV6) 3062 struct sockaddr_in6 *sin6; 3063 3064 if (addr->sa_family != AF_INET6) 3065 return 0; 3066 3067 sin6 = (struct sockaddr_in6 *) addr; 3068 3069 if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)) 3070 return 0; 3071 3072 if (!sin6->sin6_scope_id) 3073 return -EINVAL; 3074 3075 dev_addr->bound_dev_if = sin6->sin6_scope_id; 3076 #endif 3077 return 0; 3078 } 3079 3080 int rdma_listen(struct rdma_cm_id *id, int backlog) 3081 { 3082 struct rdma_id_private *id_priv; 3083 int ret; 3084 3085 id_priv = container_of(id, struct rdma_id_private, id); 3086 if (id_priv->state == RDMA_CM_IDLE) { 3087 id->route.addr.src_addr.ss_family = AF_INET; 3088 ret = rdma_bind_addr(id, cma_src_addr(id_priv)); 3089 if (ret) 3090 return ret; 3091 } 3092 3093 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) 3094 return -EINVAL; 3095 3096 if (id_priv->reuseaddr) { 3097 ret = cma_bind_listen(id_priv); 3098 if (ret) 3099 goto err; 3100 } 3101 3102 id_priv->backlog = backlog; 3103 if (id->device) { 3104 if (rdma_cap_ib_cm(id->device, 1)) { 3105 ret = cma_ib_listen(id_priv); 3106 if (ret) 3107 goto err; 3108 } else if (rdma_cap_iw_cm(id->device, 1)) { 3109 ret = cma_iw_listen(id_priv, backlog); 3110 if (ret) 3111 goto err; 3112 } else { 3113 ret = -ENOSYS; 3114 goto err; 3115 } 3116 } else 3117 cma_listen_on_all(id_priv); 3118 3119 return 0; 3120 err: 3121 id_priv->backlog = 0; 3122 cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND); 3123 return ret; 3124 } 3125 EXPORT_SYMBOL(rdma_listen); 3126 3127 int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) 3128 { 3129 struct rdma_id_private *id_priv; 3130 int ret; 3131 3132 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 && 3133 addr->sa_family != AF_IB) 3134 return -EAFNOSUPPORT; 3135 3136 id_priv = container_of(id, struct rdma_id_private, id); 3137 if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND)) 3138 return -EINVAL; 3139 3140 ret = cma_check_linklocal(&id->route.addr.dev_addr, addr); 3141 if (ret) 3142 goto err1; 3143 3144 memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr)); 3145 if (!cma_any_addr(addr)) { 3146 ret = cma_translate_addr(addr, &id->route.addr.dev_addr); 3147 if (ret) 3148 goto err1; 3149 3150 ret = cma_acquire_dev(id_priv, NULL); 3151 if (ret) 3152 goto err1; 3153 } 3154 3155 if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) { 3156 if (addr->sa_family == AF_INET) 3157 id_priv->afonly = 1; 3158 #if IS_ENABLED(CONFIG_IPV6) 3159 else if (addr->sa_family == AF_INET6) { 3160 struct net *net = id_priv->id.route.addr.dev_addr.net; 3161 3162 id_priv->afonly = net->ipv6.sysctl.bindv6only; 3163 } 3164 #endif 3165 } 3166 ret = cma_get_port(id_priv); 3167 if (ret) 3168 goto err2; 3169 3170 return 0; 3171 err2: 3172 if (id_priv->cma_dev) 3173 cma_release_dev(id_priv); 3174 err1: 3175 cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE); 3176 return ret; 3177 } 3178 EXPORT_SYMBOL(rdma_bind_addr); 3179 3180 static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv) 3181 { 3182 struct cma_hdr *cma_hdr; 3183 3184 cma_hdr = hdr; 3185 cma_hdr->cma_version = CMA_VERSION; 3186 if (cma_family(id_priv) == AF_INET) { 3187 struct sockaddr_in *src4, *dst4; 3188 3189 src4 = (struct sockaddr_in *) cma_src_addr(id_priv); 3190 dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv); 3191 3192 cma_set_ip_ver(cma_hdr, 4); 3193 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; 3194 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; 3195 cma_hdr->port = src4->sin_port; 3196 } else if (cma_family(id_priv) == AF_INET6) { 3197 struct sockaddr_in6 *src6, *dst6; 3198 3199 src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); 3200 dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv); 3201 3202 cma_set_ip_ver(cma_hdr, 6); 3203 cma_hdr->src_addr.ip6 = src6->sin6_addr; 3204 cma_hdr->dst_addr.ip6 = dst6->sin6_addr; 3205 cma_hdr->port = src6->sin6_port; 3206 } 3207 return 0; 3208 } 3209 3210 static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, 3211 struct ib_cm_event *ib_event) 3212 { 3213 struct rdma_id_private *id_priv = cm_id->context; 3214 struct rdma_cm_event event; 3215 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; 3216 int ret = 0; 3217 3218 if (cma_disable_callback(id_priv, RDMA_CM_CONNECT)) 3219 return 0; 3220 3221 memset(&event, 0, sizeof event); 3222 switch (ib_event->event) { 3223 case IB_CM_SIDR_REQ_ERROR: 3224 event.event = RDMA_CM_EVENT_UNREACHABLE; 3225 event.status = -ETIMEDOUT; 3226 break; 3227 case IB_CM_SIDR_REP_RECEIVED: 3228 event.param.ud.private_data = ib_event->private_data; 3229 event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE; 3230 if (rep->status != IB_SIDR_SUCCESS) { 3231 event.event = RDMA_CM_EVENT_UNREACHABLE; 3232 event.status = ib_event->param.sidr_rep_rcvd.status; 3233 break; 3234 } 3235 ret = cma_set_qkey(id_priv, rep->qkey); 3236 if (ret) { 3237 event.event = RDMA_CM_EVENT_ADDR_ERROR; 3238 event.status = ret; 3239 break; 3240 } 3241 ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num, 3242 id_priv->id.route.path_rec, 3243 &event.param.ud.ah_attr); 3244 event.param.ud.qp_num = rep->qpn; 3245 event.param.ud.qkey = rep->qkey; 3246 event.event = RDMA_CM_EVENT_ESTABLISHED; 3247 event.status = 0; 3248 break; 3249 default: 3250 pr_err("RDMA CMA: unexpected IB CM event: %d\n", 3251 ib_event->event); 3252 goto out; 3253 } 3254 3255 ret = id_priv->id.event_handler(&id_priv->id, &event); 3256 if (ret) { 3257 /* Destroy the CM ID by returning a non-zero value. */ 3258 id_priv->cm_id.ib = NULL; 3259 cma_exch(id_priv, RDMA_CM_DESTROYING); 3260 mutex_unlock(&id_priv->handler_mutex); 3261 rdma_destroy_id(&id_priv->id); 3262 return ret; 3263 } 3264 out: 3265 mutex_unlock(&id_priv->handler_mutex); 3266 return ret; 3267 } 3268 3269 static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, 3270 struct rdma_conn_param *conn_param) 3271 { 3272 struct ib_cm_sidr_req_param req; 3273 struct ib_cm_id *id; 3274 void *private_data; 3275 int offset, ret; 3276 3277 memset(&req, 0, sizeof req); 3278 offset = cma_user_data_offset(id_priv); 3279 req.private_data_len = offset + conn_param->private_data_len; 3280 if (req.private_data_len < conn_param->private_data_len) 3281 return -EINVAL; 3282 3283 if (req.private_data_len) { 3284 private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 3285 if (!private_data) 3286 return -ENOMEM; 3287 } else { 3288 private_data = NULL; 3289 } 3290 3291 if (conn_param->private_data && conn_param->private_data_len) 3292 memcpy(private_data + offset, conn_param->private_data, 3293 conn_param->private_data_len); 3294 3295 if (private_data) { 3296 ret = cma_format_hdr(private_data, id_priv); 3297 if (ret) 3298 goto out; 3299 req.private_data = private_data; 3300 } 3301 3302 id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler, 3303 id_priv); 3304 if (IS_ERR(id)) { 3305 ret = PTR_ERR(id); 3306 goto out; 3307 } 3308 id_priv->cm_id.ib = id; 3309 3310 req.path = id_priv->id.route.path_rec; 3311 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); 3312 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); 3313 req.max_cm_retries = CMA_MAX_CM_RETRIES; 3314 3315 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req); 3316 if (ret) { 3317 ib_destroy_cm_id(id_priv->cm_id.ib); 3318 id_priv->cm_id.ib = NULL; 3319 } 3320 out: 3321 kfree(private_data); 3322 return ret; 3323 } 3324 3325 static int cma_connect_ib(struct rdma_id_private *id_priv, 3326 struct rdma_conn_param *conn_param) 3327 { 3328 struct ib_cm_req_param req; 3329 struct rdma_route *route; 3330 void *private_data; 3331 struct ib_cm_id *id; 3332 int offset, ret; 3333 3334 memset(&req, 0, sizeof req); 3335 offset = cma_user_data_offset(id_priv); 3336 req.private_data_len = offset + conn_param->private_data_len; 3337 if (req.private_data_len < conn_param->private_data_len) 3338 return -EINVAL; 3339 3340 if (req.private_data_len) { 3341 private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 3342 if (!private_data) 3343 return -ENOMEM; 3344 } else { 3345 private_data = NULL; 3346 } 3347 3348 if (conn_param->private_data && conn_param->private_data_len) 3349 memcpy(private_data + offset, conn_param->private_data, 3350 conn_param->private_data_len); 3351 3352 id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv); 3353 if (IS_ERR(id)) { 3354 ret = PTR_ERR(id); 3355 goto out; 3356 } 3357 id_priv->cm_id.ib = id; 3358 3359 route = &id_priv->id.route; 3360 if (private_data) { 3361 ret = cma_format_hdr(private_data, id_priv); 3362 if (ret) 3363 goto out; 3364 req.private_data = private_data; 3365 } 3366 3367 req.primary_path = &route->path_rec[0]; 3368 if (route->num_paths == 2) 3369 req.alternate_path = &route->path_rec[1]; 3370 3371 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); 3372 req.qp_num = id_priv->qp_num; 3373 req.qp_type = id_priv->id.qp_type; 3374 req.starting_psn = id_priv->seq_num; 3375 req.responder_resources = conn_param->responder_resources; 3376 req.initiator_depth = conn_param->initiator_depth; 3377 req.flow_control = conn_param->flow_control; 3378 req.retry_count = min_t(u8, 7, conn_param->retry_count); 3379 req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); 3380 req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 3381 req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 3382 req.max_cm_retries = CMA_MAX_CM_RETRIES; 3383 req.srq = id_priv->srq ? 1 : 0; 3384 3385 ret = ib_send_cm_req(id_priv->cm_id.ib, &req); 3386 out: 3387 if (ret && !IS_ERR(id)) { 3388 ib_destroy_cm_id(id); 3389 id_priv->cm_id.ib = NULL; 3390 } 3391 3392 kfree(private_data); 3393 return ret; 3394 } 3395 3396 static int cma_connect_iw(struct rdma_id_private *id_priv, 3397 struct rdma_conn_param *conn_param) 3398 { 3399 struct iw_cm_id *cm_id; 3400 int ret; 3401 struct iw_cm_conn_param iw_param; 3402 3403 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv); 3404 if (IS_ERR(cm_id)) 3405 return PTR_ERR(cm_id); 3406 3407 cm_id->tos = id_priv->tos; 3408 id_priv->cm_id.iw = cm_id; 3409 3410 memcpy(&cm_id->local_addr, cma_src_addr(id_priv), 3411 rdma_addr_size(cma_src_addr(id_priv))); 3412 memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv), 3413 rdma_addr_size(cma_dst_addr(id_priv))); 3414 3415 ret = cma_modify_qp_rtr(id_priv, conn_param); 3416 if (ret) 3417 goto out; 3418 3419 if (conn_param) { 3420 iw_param.ord = conn_param->initiator_depth; 3421 iw_param.ird = conn_param->responder_resources; 3422 iw_param.private_data = conn_param->private_data; 3423 iw_param.private_data_len = conn_param->private_data_len; 3424 iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num; 3425 } else { 3426 memset(&iw_param, 0, sizeof iw_param); 3427 iw_param.qpn = id_priv->qp_num; 3428 } 3429 ret = iw_cm_connect(cm_id, &iw_param); 3430 out: 3431 if (ret) { 3432 iw_destroy_cm_id(cm_id); 3433 id_priv->cm_id.iw = NULL; 3434 } 3435 return ret; 3436 } 3437 3438 int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 3439 { 3440 struct rdma_id_private *id_priv; 3441 int ret; 3442 3443 id_priv = container_of(id, struct rdma_id_private, id); 3444 if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT)) 3445 return -EINVAL; 3446 3447 if (!id->qp) { 3448 id_priv->qp_num = conn_param->qp_num; 3449 id_priv->srq = conn_param->srq; 3450 } 3451 3452 if (rdma_cap_ib_cm(id->device, id->port_num)) { 3453 if (id->qp_type == IB_QPT_UD) 3454 ret = cma_resolve_ib_udp(id_priv, conn_param); 3455 else 3456 ret = cma_connect_ib(id_priv, conn_param); 3457 } else if (rdma_cap_iw_cm(id->device, id->port_num)) 3458 ret = cma_connect_iw(id_priv, conn_param); 3459 else 3460 ret = -ENOSYS; 3461 if (ret) 3462 goto err; 3463 3464 return 0; 3465 err: 3466 cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED); 3467 return ret; 3468 } 3469 EXPORT_SYMBOL(rdma_connect); 3470 3471 static int cma_accept_ib(struct rdma_id_private *id_priv, 3472 struct rdma_conn_param *conn_param) 3473 { 3474 struct ib_cm_rep_param rep; 3475 int ret; 3476 3477 ret = cma_modify_qp_rtr(id_priv, conn_param); 3478 if (ret) 3479 goto out; 3480 3481 ret = cma_modify_qp_rts(id_priv, conn_param); 3482 if (ret) 3483 goto out; 3484 3485 memset(&rep, 0, sizeof rep); 3486 rep.qp_num = id_priv->qp_num; 3487 rep.starting_psn = id_priv->seq_num; 3488 rep.private_data = conn_param->private_data; 3489 rep.private_data_len = conn_param->private_data_len; 3490 rep.responder_resources = conn_param->responder_resources; 3491 rep.initiator_depth = conn_param->initiator_depth; 3492 rep.failover_accepted = 0; 3493 rep.flow_control = conn_param->flow_control; 3494 rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); 3495 rep.srq = id_priv->srq ? 1 : 0; 3496 3497 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); 3498 out: 3499 return ret; 3500 } 3501 3502 static int cma_accept_iw(struct rdma_id_private *id_priv, 3503 struct rdma_conn_param *conn_param) 3504 { 3505 struct iw_cm_conn_param iw_param; 3506 int ret; 3507 3508 ret = cma_modify_qp_rtr(id_priv, conn_param); 3509 if (ret) 3510 return ret; 3511 3512 iw_param.ord = conn_param->initiator_depth; 3513 iw_param.ird = conn_param->responder_resources; 3514 iw_param.private_data = conn_param->private_data; 3515 iw_param.private_data_len = conn_param->private_data_len; 3516 if (id_priv->id.qp) { 3517 iw_param.qpn = id_priv->qp_num; 3518 } else 3519 iw_param.qpn = conn_param->qp_num; 3520 3521 return iw_cm_accept(id_priv->cm_id.iw, &iw_param); 3522 } 3523 3524 static int cma_send_sidr_rep(struct rdma_id_private *id_priv, 3525 enum ib_cm_sidr_status status, u32 qkey, 3526 const void *private_data, int private_data_len) 3527 { 3528 struct ib_cm_sidr_rep_param rep; 3529 int ret; 3530 3531 memset(&rep, 0, sizeof rep); 3532 rep.status = status; 3533 if (status == IB_SIDR_SUCCESS) { 3534 ret = cma_set_qkey(id_priv, qkey); 3535 if (ret) 3536 return ret; 3537 rep.qp_num = id_priv->qp_num; 3538 rep.qkey = id_priv->qkey; 3539 } 3540 rep.private_data = private_data; 3541 rep.private_data_len = private_data_len; 3542 3543 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep); 3544 } 3545 3546 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 3547 { 3548 struct rdma_id_private *id_priv; 3549 int ret; 3550 3551 id_priv = container_of(id, struct rdma_id_private, id); 3552 3553 id_priv->owner = task_pid_nr(current); 3554 3555 if (!cma_comp(id_priv, RDMA_CM_CONNECT)) 3556 return -EINVAL; 3557 3558 if (!id->qp && conn_param) { 3559 id_priv->qp_num = conn_param->qp_num; 3560 id_priv->srq = conn_param->srq; 3561 } 3562 3563 if (rdma_cap_ib_cm(id->device, id->port_num)) { 3564 if (id->qp_type == IB_QPT_UD) { 3565 if (conn_param) 3566 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 3567 conn_param->qkey, 3568 conn_param->private_data, 3569 conn_param->private_data_len); 3570 else 3571 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 3572 0, NULL, 0); 3573 } else { 3574 if (conn_param) 3575 ret = cma_accept_ib(id_priv, conn_param); 3576 else 3577 ret = cma_rep_recv(id_priv); 3578 } 3579 } else if (rdma_cap_iw_cm(id->device, id->port_num)) 3580 ret = cma_accept_iw(id_priv, conn_param); 3581 else 3582 ret = -ENOSYS; 3583 3584 if (ret) 3585 goto reject; 3586 3587 return 0; 3588 reject: 3589 cma_modify_qp_err(id_priv); 3590 rdma_reject(id, NULL, 0); 3591 return ret; 3592 } 3593 EXPORT_SYMBOL(rdma_accept); 3594 3595 int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event) 3596 { 3597 struct rdma_id_private *id_priv; 3598 int ret; 3599 3600 id_priv = container_of(id, struct rdma_id_private, id); 3601 if (!id_priv->cm_id.ib) 3602 return -EINVAL; 3603 3604 switch (id->device->node_type) { 3605 case RDMA_NODE_IB_CA: 3606 ret = ib_cm_notify(id_priv->cm_id.ib, event); 3607 break; 3608 default: 3609 ret = 0; 3610 break; 3611 } 3612 return ret; 3613 } 3614 EXPORT_SYMBOL(rdma_notify); 3615 3616 int rdma_reject(struct rdma_cm_id *id, const void *private_data, 3617 u8 private_data_len) 3618 { 3619 struct rdma_id_private *id_priv; 3620 int ret; 3621 3622 id_priv = container_of(id, struct rdma_id_private, id); 3623 if (!id_priv->cm_id.ib) 3624 return -EINVAL; 3625 3626 if (rdma_cap_ib_cm(id->device, id->port_num)) { 3627 if (id->qp_type == IB_QPT_UD) 3628 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0, 3629 private_data, private_data_len); 3630 else 3631 ret = ib_send_cm_rej(id_priv->cm_id.ib, 3632 IB_CM_REJ_CONSUMER_DEFINED, NULL, 3633 0, private_data, private_data_len); 3634 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 3635 ret = iw_cm_reject(id_priv->cm_id.iw, 3636 private_data, private_data_len); 3637 } else 3638 ret = -ENOSYS; 3639 3640 return ret; 3641 } 3642 EXPORT_SYMBOL(rdma_reject); 3643 3644 int rdma_disconnect(struct rdma_cm_id *id) 3645 { 3646 struct rdma_id_private *id_priv; 3647 int ret; 3648 3649 id_priv = container_of(id, struct rdma_id_private, id); 3650 if (!id_priv->cm_id.ib) 3651 return -EINVAL; 3652 3653 if (rdma_cap_ib_cm(id->device, id->port_num)) { 3654 ret = cma_modify_qp_err(id_priv); 3655 if (ret) 3656 goto out; 3657 /* Initiate or respond to a disconnect. */ 3658 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) 3659 ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0); 3660 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 3661 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); 3662 } else 3663 ret = -EINVAL; 3664 3665 out: 3666 return ret; 3667 } 3668 EXPORT_SYMBOL(rdma_disconnect); 3669 3670 static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) 3671 { 3672 struct rdma_id_private *id_priv; 3673 struct cma_multicast *mc = multicast->context; 3674 struct rdma_cm_event event; 3675 int ret; 3676 3677 id_priv = mc->id_priv; 3678 if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) && 3679 cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED)) 3680 return 0; 3681 3682 if (!status) 3683 status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); 3684 mutex_lock(&id_priv->qp_mutex); 3685 if (!status && id_priv->id.qp) 3686 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid, 3687 be16_to_cpu(multicast->rec.mlid)); 3688 mutex_unlock(&id_priv->qp_mutex); 3689 3690 memset(&event, 0, sizeof event); 3691 event.status = status; 3692 event.param.ud.private_data = mc->context; 3693 if (!status) { 3694 struct rdma_dev_addr *dev_addr = 3695 &id_priv->id.route.addr.dev_addr; 3696 struct net_device *ndev = 3697 dev_get_by_index(&init_net, dev_addr->bound_dev_if); 3698 enum ib_gid_type gid_type = 3699 id_priv->cma_dev->default_gid_type[id_priv->id.port_num - 3700 rdma_start_port(id_priv->cma_dev->device)]; 3701 3702 event.event = RDMA_CM_EVENT_MULTICAST_JOIN; 3703 ib_init_ah_from_mcmember(id_priv->id.device, 3704 id_priv->id.port_num, &multicast->rec, 3705 ndev, gid_type, 3706 &event.param.ud.ah_attr); 3707 event.param.ud.qp_num = 0xFFFFFF; 3708 event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey); 3709 if (ndev) 3710 dev_put(ndev); 3711 } else 3712 event.event = RDMA_CM_EVENT_MULTICAST_ERROR; 3713 3714 ret = id_priv->id.event_handler(&id_priv->id, &event); 3715 if (ret) { 3716 cma_exch(id_priv, RDMA_CM_DESTROYING); 3717 mutex_unlock(&id_priv->handler_mutex); 3718 rdma_destroy_id(&id_priv->id); 3719 return 0; 3720 } 3721 3722 mutex_unlock(&id_priv->handler_mutex); 3723 return 0; 3724 } 3725 3726 static void cma_set_mgid(struct rdma_id_private *id_priv, 3727 struct sockaddr *addr, union ib_gid *mgid) 3728 { 3729 unsigned char mc_map[MAX_ADDR_LEN]; 3730 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 3731 struct sockaddr_in *sin = (struct sockaddr_in *) addr; 3732 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr; 3733 3734 if (cma_any_addr(addr)) { 3735 memset(mgid, 0, sizeof *mgid); 3736 } else if ((addr->sa_family == AF_INET6) && 3737 ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) == 3738 0xFF10A01B)) { 3739 /* IPv6 address is an SA assigned MGID. */ 3740 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); 3741 } else if (addr->sa_family == AF_IB) { 3742 memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid); 3743 } else if ((addr->sa_family == AF_INET6)) { 3744 ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map); 3745 if (id_priv->id.ps == RDMA_PS_UDP) 3746 mc_map[7] = 0x01; /* Use RDMA CM signature */ 3747 *mgid = *(union ib_gid *) (mc_map + 4); 3748 } else { 3749 ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map); 3750 if (id_priv->id.ps == RDMA_PS_UDP) 3751 mc_map[7] = 0x01; /* Use RDMA CM signature */ 3752 *mgid = *(union ib_gid *) (mc_map + 4); 3753 } 3754 } 3755 3756 static int cma_join_ib_multicast(struct rdma_id_private *id_priv, 3757 struct cma_multicast *mc) 3758 { 3759 struct ib_sa_mcmember_rec rec; 3760 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 3761 ib_sa_comp_mask comp_mask; 3762 int ret; 3763 3764 ib_addr_get_mgid(dev_addr, &rec.mgid); 3765 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num, 3766 &rec.mgid, &rec); 3767 if (ret) 3768 return ret; 3769 3770 ret = cma_set_qkey(id_priv, 0); 3771 if (ret) 3772 return ret; 3773 3774 cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid); 3775 rec.qkey = cpu_to_be32(id_priv->qkey); 3776 rdma_addr_get_sgid(dev_addr, &rec.port_gid); 3777 rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 3778 rec.join_state = 1; 3779 3780 comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | 3781 IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE | 3782 IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL | 3783 IB_SA_MCMEMBER_REC_FLOW_LABEL | 3784 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; 3785 3786 if (id_priv->id.ps == RDMA_PS_IPOIB) 3787 comp_mask |= IB_SA_MCMEMBER_REC_RATE | 3788 IB_SA_MCMEMBER_REC_RATE_SELECTOR | 3789 IB_SA_MCMEMBER_REC_MTU_SELECTOR | 3790 IB_SA_MCMEMBER_REC_MTU | 3791 IB_SA_MCMEMBER_REC_HOP_LIMIT; 3792 3793 mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device, 3794 id_priv->id.port_num, &rec, 3795 comp_mask, GFP_KERNEL, 3796 cma_ib_mc_handler, mc); 3797 return PTR_ERR_OR_ZERO(mc->multicast.ib); 3798 } 3799 3800 static void iboe_mcast_work_handler(struct work_struct *work) 3801 { 3802 struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work); 3803 struct cma_multicast *mc = mw->mc; 3804 struct ib_sa_multicast *m = mc->multicast.ib; 3805 3806 mc->multicast.ib->context = mc; 3807 cma_ib_mc_handler(0, m); 3808 kref_put(&mc->mcref, release_mc); 3809 kfree(mw); 3810 } 3811 3812 static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid) 3813 { 3814 struct sockaddr_in *sin = (struct sockaddr_in *)addr; 3815 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr; 3816 3817 if (cma_any_addr(addr)) { 3818 memset(mgid, 0, sizeof *mgid); 3819 } else if (addr->sa_family == AF_INET6) { 3820 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); 3821 } else { 3822 mgid->raw[0] = 0xff; 3823 mgid->raw[1] = 0x0e; 3824 mgid->raw[2] = 0; 3825 mgid->raw[3] = 0; 3826 mgid->raw[4] = 0; 3827 mgid->raw[5] = 0; 3828 mgid->raw[6] = 0; 3829 mgid->raw[7] = 0; 3830 mgid->raw[8] = 0; 3831 mgid->raw[9] = 0; 3832 mgid->raw[10] = 0xff; 3833 mgid->raw[11] = 0xff; 3834 *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr; 3835 } 3836 } 3837 3838 static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, 3839 struct cma_multicast *mc) 3840 { 3841 struct iboe_mcast_work *work; 3842 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 3843 int err = 0; 3844 struct sockaddr *addr = (struct sockaddr *)&mc->addr; 3845 struct net_device *ndev = NULL; 3846 enum ib_gid_type gid_type; 3847 3848 if (cma_zero_addr((struct sockaddr *)&mc->addr)) 3849 return -EINVAL; 3850 3851 work = kzalloc(sizeof *work, GFP_KERNEL); 3852 if (!work) 3853 return -ENOMEM; 3854 3855 mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL); 3856 if (!mc->multicast.ib) { 3857 err = -ENOMEM; 3858 goto out1; 3859 } 3860 3861 cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid); 3862 3863 mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff); 3864 if (id_priv->id.ps == RDMA_PS_UDP) 3865 mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); 3866 3867 if (dev_addr->bound_dev_if) 3868 ndev = dev_get_by_index(&init_net, dev_addr->bound_dev_if); 3869 if (!ndev) { 3870 err = -ENODEV; 3871 goto out2; 3872 } 3873 mc->multicast.ib->rec.rate = iboe_get_rate(ndev); 3874 mc->multicast.ib->rec.hop_limit = 1; 3875 mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->mtu); 3876 3877 gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num - 3878 rdma_start_port(id_priv->cma_dev->device)]; 3879 if (addr->sa_family == AF_INET) { 3880 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) 3881 err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, 3882 true); 3883 if (!err) { 3884 mc->igmp_joined = true; 3885 mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT; 3886 } 3887 } else { 3888 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) 3889 err = -ENOTSUPP; 3890 } 3891 dev_put(ndev); 3892 if (err || !mc->multicast.ib->rec.mtu) { 3893 if (!err) 3894 err = -EINVAL; 3895 goto out2; 3896 } 3897 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, 3898 &mc->multicast.ib->rec.port_gid); 3899 work->id = id_priv; 3900 work->mc = mc; 3901 INIT_WORK(&work->work, iboe_mcast_work_handler); 3902 kref_get(&mc->mcref); 3903 queue_work(cma_wq, &work->work); 3904 3905 return 0; 3906 3907 out2: 3908 kfree(mc->multicast.ib); 3909 out1: 3910 kfree(work); 3911 return err; 3912 } 3913 3914 int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, 3915 void *context) 3916 { 3917 struct rdma_id_private *id_priv; 3918 struct cma_multicast *mc; 3919 int ret; 3920 3921 id_priv = container_of(id, struct rdma_id_private, id); 3922 if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) && 3923 !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED)) 3924 return -EINVAL; 3925 3926 mc = kmalloc(sizeof *mc, GFP_KERNEL); 3927 if (!mc) 3928 return -ENOMEM; 3929 3930 memcpy(&mc->addr, addr, rdma_addr_size(addr)); 3931 mc->context = context; 3932 mc->id_priv = id_priv; 3933 mc->igmp_joined = false; 3934 spin_lock(&id_priv->lock); 3935 list_add(&mc->list, &id_priv->mc_list); 3936 spin_unlock(&id_priv->lock); 3937 3938 if (rdma_protocol_roce(id->device, id->port_num)) { 3939 kref_init(&mc->mcref); 3940 ret = cma_iboe_join_multicast(id_priv, mc); 3941 } else if (rdma_cap_ib_mcast(id->device, id->port_num)) 3942 ret = cma_join_ib_multicast(id_priv, mc); 3943 else 3944 ret = -ENOSYS; 3945 3946 if (ret) { 3947 spin_lock_irq(&id_priv->lock); 3948 list_del(&mc->list); 3949 spin_unlock_irq(&id_priv->lock); 3950 kfree(mc); 3951 } 3952 return ret; 3953 } 3954 EXPORT_SYMBOL(rdma_join_multicast); 3955 3956 void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) 3957 { 3958 struct rdma_id_private *id_priv; 3959 struct cma_multicast *mc; 3960 3961 id_priv = container_of(id, struct rdma_id_private, id); 3962 spin_lock_irq(&id_priv->lock); 3963 list_for_each_entry(mc, &id_priv->mc_list, list) { 3964 if (!memcmp(&mc->addr, addr, rdma_addr_size(addr))) { 3965 list_del(&mc->list); 3966 spin_unlock_irq(&id_priv->lock); 3967 3968 if (id->qp) 3969 ib_detach_mcast(id->qp, 3970 &mc->multicast.ib->rec.mgid, 3971 be16_to_cpu(mc->multicast.ib->rec.mlid)); 3972 3973 BUG_ON(id_priv->cma_dev->device != id->device); 3974 3975 if (rdma_cap_ib_mcast(id->device, id->port_num)) { 3976 ib_sa_free_multicast(mc->multicast.ib); 3977 kfree(mc); 3978 } else if (rdma_protocol_roce(id->device, id->port_num)) { 3979 if (mc->igmp_joined) { 3980 struct rdma_dev_addr *dev_addr = 3981 &id->route.addr.dev_addr; 3982 struct net_device *ndev = NULL; 3983 3984 if (dev_addr->bound_dev_if) 3985 ndev = dev_get_by_index(&init_net, 3986 dev_addr->bound_dev_if); 3987 if (ndev) { 3988 cma_igmp_send(ndev, 3989 &mc->multicast.ib->rec.mgid, 3990 false); 3991 dev_put(ndev); 3992 } 3993 mc->igmp_joined = false; 3994 } 3995 kref_put(&mc->mcref, release_mc); 3996 } 3997 return; 3998 } 3999 } 4000 spin_unlock_irq(&id_priv->lock); 4001 } 4002 EXPORT_SYMBOL(rdma_leave_multicast); 4003 4004 static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv) 4005 { 4006 struct rdma_dev_addr *dev_addr; 4007 struct cma_ndev_work *work; 4008 4009 dev_addr = &id_priv->id.route.addr.dev_addr; 4010 4011 if ((dev_addr->bound_dev_if == ndev->ifindex) && 4012 (net_eq(dev_net(ndev), dev_addr->net)) && 4013 memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) { 4014 pr_info("RDMA CM addr change for ndev %s used by id %p\n", 4015 ndev->name, &id_priv->id); 4016 work = kzalloc(sizeof *work, GFP_KERNEL); 4017 if (!work) 4018 return -ENOMEM; 4019 4020 INIT_WORK(&work->work, cma_ndev_work_handler); 4021 work->id = id_priv; 4022 work->event.event = RDMA_CM_EVENT_ADDR_CHANGE; 4023 atomic_inc(&id_priv->refcount); 4024 queue_work(cma_wq, &work->work); 4025 } 4026 4027 return 0; 4028 } 4029 4030 static int cma_netdev_callback(struct notifier_block *self, unsigned long event, 4031 void *ptr) 4032 { 4033 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 4034 struct cma_device *cma_dev; 4035 struct rdma_id_private *id_priv; 4036 int ret = NOTIFY_DONE; 4037 4038 if (event != NETDEV_BONDING_FAILOVER) 4039 return NOTIFY_DONE; 4040 4041 if (!(ndev->flags & IFF_MASTER) || !(ndev->priv_flags & IFF_BONDING)) 4042 return NOTIFY_DONE; 4043 4044 mutex_lock(&lock); 4045 list_for_each_entry(cma_dev, &dev_list, list) 4046 list_for_each_entry(id_priv, &cma_dev->id_list, list) { 4047 ret = cma_netdev_change(ndev, id_priv); 4048 if (ret) 4049 goto out; 4050 } 4051 4052 out: 4053 mutex_unlock(&lock); 4054 return ret; 4055 } 4056 4057 static struct notifier_block cma_nb = { 4058 .notifier_call = cma_netdev_callback 4059 }; 4060 4061 static void cma_add_one(struct ib_device *device) 4062 { 4063 struct cma_device *cma_dev; 4064 struct rdma_id_private *id_priv; 4065 unsigned int i; 4066 unsigned long supported_gids = 0; 4067 4068 cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL); 4069 if (!cma_dev) 4070 return; 4071 4072 cma_dev->device = device; 4073 cma_dev->default_gid_type = kcalloc(device->phys_port_cnt, 4074 sizeof(*cma_dev->default_gid_type), 4075 GFP_KERNEL); 4076 if (!cma_dev->default_gid_type) { 4077 kfree(cma_dev); 4078 return; 4079 } 4080 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) { 4081 supported_gids = roce_gid_type_mask_support(device, i); 4082 WARN_ON(!supported_gids); 4083 cma_dev->default_gid_type[i - rdma_start_port(device)] = 4084 find_first_bit(&supported_gids, BITS_PER_LONG); 4085 } 4086 4087 init_completion(&cma_dev->comp); 4088 atomic_set(&cma_dev->refcount, 1); 4089 INIT_LIST_HEAD(&cma_dev->id_list); 4090 ib_set_client_data(device, &cma_client, cma_dev); 4091 4092 mutex_lock(&lock); 4093 list_add_tail(&cma_dev->list, &dev_list); 4094 list_for_each_entry(id_priv, &listen_any_list, list) 4095 cma_listen_on_dev(id_priv, cma_dev); 4096 mutex_unlock(&lock); 4097 } 4098 4099 static int cma_remove_id_dev(struct rdma_id_private *id_priv) 4100 { 4101 struct rdma_cm_event event; 4102 enum rdma_cm_state state; 4103 int ret = 0; 4104 4105 /* Record that we want to remove the device */ 4106 state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL); 4107 if (state == RDMA_CM_DESTROYING) 4108 return 0; 4109 4110 cma_cancel_operation(id_priv, state); 4111 mutex_lock(&id_priv->handler_mutex); 4112 4113 /* Check for destruction from another callback. */ 4114 if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL)) 4115 goto out; 4116 4117 memset(&event, 0, sizeof event); 4118 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL; 4119 ret = id_priv->id.event_handler(&id_priv->id, &event); 4120 out: 4121 mutex_unlock(&id_priv->handler_mutex); 4122 return ret; 4123 } 4124 4125 static void cma_process_remove(struct cma_device *cma_dev) 4126 { 4127 struct rdma_id_private *id_priv; 4128 int ret; 4129 4130 mutex_lock(&lock); 4131 while (!list_empty(&cma_dev->id_list)) { 4132 id_priv = list_entry(cma_dev->id_list.next, 4133 struct rdma_id_private, list); 4134 4135 list_del(&id_priv->listen_list); 4136 list_del_init(&id_priv->list); 4137 atomic_inc(&id_priv->refcount); 4138 mutex_unlock(&lock); 4139 4140 ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv); 4141 cma_deref_id(id_priv); 4142 if (ret) 4143 rdma_destroy_id(&id_priv->id); 4144 4145 mutex_lock(&lock); 4146 } 4147 mutex_unlock(&lock); 4148 4149 cma_deref_dev(cma_dev); 4150 wait_for_completion(&cma_dev->comp); 4151 } 4152 4153 static void cma_remove_one(struct ib_device *device, void *client_data) 4154 { 4155 struct cma_device *cma_dev = client_data; 4156 4157 if (!cma_dev) 4158 return; 4159 4160 mutex_lock(&lock); 4161 list_del(&cma_dev->list); 4162 mutex_unlock(&lock); 4163 4164 cma_process_remove(cma_dev); 4165 kfree(cma_dev->default_gid_type); 4166 kfree(cma_dev); 4167 } 4168 4169 static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb) 4170 { 4171 struct nlmsghdr *nlh; 4172 struct rdma_cm_id_stats *id_stats; 4173 struct rdma_id_private *id_priv; 4174 struct rdma_cm_id *id = NULL; 4175 struct cma_device *cma_dev; 4176 int i_dev = 0, i_id = 0; 4177 4178 /* 4179 * We export all of the IDs as a sequence of messages. Each 4180 * ID gets its own netlink message. 4181 */ 4182 mutex_lock(&lock); 4183 4184 list_for_each_entry(cma_dev, &dev_list, list) { 4185 if (i_dev < cb->args[0]) { 4186 i_dev++; 4187 continue; 4188 } 4189 4190 i_id = 0; 4191 list_for_each_entry(id_priv, &cma_dev->id_list, list) { 4192 if (i_id < cb->args[1]) { 4193 i_id++; 4194 continue; 4195 } 4196 4197 id_stats = ibnl_put_msg(skb, &nlh, cb->nlh->nlmsg_seq, 4198 sizeof *id_stats, RDMA_NL_RDMA_CM, 4199 RDMA_NL_RDMA_CM_ID_STATS, 4200 NLM_F_MULTI); 4201 if (!id_stats) 4202 goto out; 4203 4204 memset(id_stats, 0, sizeof *id_stats); 4205 id = &id_priv->id; 4206 id_stats->node_type = id->route.addr.dev_addr.dev_type; 4207 id_stats->port_num = id->port_num; 4208 id_stats->bound_dev_if = 4209 id->route.addr.dev_addr.bound_dev_if; 4210 4211 if (ibnl_put_attr(skb, nlh, 4212 rdma_addr_size(cma_src_addr(id_priv)), 4213 cma_src_addr(id_priv), 4214 RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) 4215 goto out; 4216 if (ibnl_put_attr(skb, nlh, 4217 rdma_addr_size(cma_src_addr(id_priv)), 4218 cma_dst_addr(id_priv), 4219 RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) 4220 goto out; 4221 4222 id_stats->pid = id_priv->owner; 4223 id_stats->port_space = id->ps; 4224 id_stats->cm_state = id_priv->state; 4225 id_stats->qp_num = id_priv->qp_num; 4226 id_stats->qp_type = id->qp_type; 4227 4228 i_id++; 4229 } 4230 4231 cb->args[1] = 0; 4232 i_dev++; 4233 } 4234 4235 out: 4236 mutex_unlock(&lock); 4237 cb->args[0] = i_dev; 4238 cb->args[1] = i_id; 4239 4240 return skb->len; 4241 } 4242 4243 static const struct ibnl_client_cbs cma_cb_table[] = { 4244 [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats, 4245 .module = THIS_MODULE }, 4246 }; 4247 4248 static int cma_init_net(struct net *net) 4249 { 4250 struct cma_pernet *pernet = cma_pernet(net); 4251 4252 idr_init(&pernet->tcp_ps); 4253 idr_init(&pernet->udp_ps); 4254 idr_init(&pernet->ipoib_ps); 4255 idr_init(&pernet->ib_ps); 4256 4257 return 0; 4258 } 4259 4260 static void cma_exit_net(struct net *net) 4261 { 4262 struct cma_pernet *pernet = cma_pernet(net); 4263 4264 idr_destroy(&pernet->tcp_ps); 4265 idr_destroy(&pernet->udp_ps); 4266 idr_destroy(&pernet->ipoib_ps); 4267 idr_destroy(&pernet->ib_ps); 4268 } 4269 4270 static struct pernet_operations cma_pernet_operations = { 4271 .init = cma_init_net, 4272 .exit = cma_exit_net, 4273 .id = &cma_pernet_id, 4274 .size = sizeof(struct cma_pernet), 4275 }; 4276 4277 static int __init cma_init(void) 4278 { 4279 int ret; 4280 4281 cma_wq = create_singlethread_workqueue("rdma_cm"); 4282 if (!cma_wq) 4283 return -ENOMEM; 4284 4285 ret = register_pernet_subsys(&cma_pernet_operations); 4286 if (ret) 4287 goto err_wq; 4288 4289 ib_sa_register_client(&sa_client); 4290 rdma_addr_register_client(&addr_client); 4291 register_netdevice_notifier(&cma_nb); 4292 4293 ret = ib_register_client(&cma_client); 4294 if (ret) 4295 goto err; 4296 4297 if (ibnl_add_client(RDMA_NL_RDMA_CM, RDMA_NL_RDMA_CM_NUM_OPS, cma_cb_table)) 4298 pr_warn("RDMA CMA: failed to add netlink callback\n"); 4299 cma_configfs_init(); 4300 4301 return 0; 4302 4303 err: 4304 unregister_netdevice_notifier(&cma_nb); 4305 rdma_addr_unregister_client(&addr_client); 4306 ib_sa_unregister_client(&sa_client); 4307 err_wq: 4308 destroy_workqueue(cma_wq); 4309 return ret; 4310 } 4311 4312 static void __exit cma_cleanup(void) 4313 { 4314 cma_configfs_exit(); 4315 ibnl_remove_client(RDMA_NL_RDMA_CM); 4316 ib_unregister_client(&cma_client); 4317 unregister_netdevice_notifier(&cma_nb); 4318 rdma_addr_unregister_client(&addr_client); 4319 ib_sa_unregister_client(&sa_client); 4320 unregister_pernet_subsys(&cma_pernet_operations); 4321 destroy_workqueue(cma_wq); 4322 } 4323 4324 module_init(cma_init); 4325 module_exit(cma_cleanup); 4326