1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright (c) 2005 Voltaire Inc. All rights reserved. 4 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved. 5 * Copyright (c) 1999-2019, Mellanox Technologies, Inc. All rights reserved. 6 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. 7 */ 8 9 #include <linux/completion.h> 10 #include <linux/in.h> 11 #include <linux/in6.h> 12 #include <linux/mutex.h> 13 #include <linux/random.h> 14 #include <linux/rbtree.h> 15 #include <linux/igmp.h> 16 #include <linux/xarray.h> 17 #include <linux/inetdevice.h> 18 #include <linux/slab.h> 19 #include <linux/module.h> 20 #include <net/route.h> 21 22 #include <net/net_namespace.h> 23 #include <net/netns/generic.h> 24 #include <net/netevent.h> 25 #include <net/tcp.h> 26 #include <net/ipv6.h> 27 #include <net/ip_fib.h> 28 #include <net/ip6_route.h> 29 30 #include <rdma/rdma_cm.h> 31 #include <rdma/rdma_cm_ib.h> 32 #include <rdma/rdma_netlink.h> 33 #include <rdma/ib.h> 34 #include <rdma/ib_cache.h> 35 #include <rdma/ib_cm.h> 36 #include <rdma/ib_sa.h> 37 #include <rdma/iw_cm.h> 38 39 #include "core_priv.h" 40 #include "cma_priv.h" 41 #include "cma_trace.h" 42 43 MODULE_AUTHOR("Sean Hefty"); 44 MODULE_DESCRIPTION("Generic RDMA CM Agent"); 45 MODULE_LICENSE("Dual BSD/GPL"); 46 47 #define CMA_CM_RESPONSE_TIMEOUT 20 48 #define CMA_MAX_CM_RETRIES 15 49 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) 50 #define CMA_IBOE_PACKET_LIFETIME 16 51 #define CMA_PREFERRED_ROCE_GID_TYPE IB_GID_TYPE_ROCE_UDP_ENCAP 52 53 static const char * const cma_events[] = { 54 [RDMA_CM_EVENT_ADDR_RESOLVED] = "address resolved", 55 [RDMA_CM_EVENT_ADDR_ERROR] = "address error", 56 [RDMA_CM_EVENT_ROUTE_RESOLVED] = "route resolved ", 57 [RDMA_CM_EVENT_ROUTE_ERROR] = "route error", 58 [RDMA_CM_EVENT_CONNECT_REQUEST] = "connect request", 59 [RDMA_CM_EVENT_CONNECT_RESPONSE] = "connect response", 60 [RDMA_CM_EVENT_CONNECT_ERROR] = "connect error", 61 [RDMA_CM_EVENT_UNREACHABLE] = "unreachable", 62 [RDMA_CM_EVENT_REJECTED] = "rejected", 63 [RDMA_CM_EVENT_ESTABLISHED] = "established", 64 [RDMA_CM_EVENT_DISCONNECTED] = "disconnected", 65 [RDMA_CM_EVENT_DEVICE_REMOVAL] = "device removal", 66 [RDMA_CM_EVENT_MULTICAST_JOIN] = "multicast join", 67 [RDMA_CM_EVENT_MULTICAST_ERROR] = "multicast error", 68 [RDMA_CM_EVENT_ADDR_CHANGE] = "address change", 69 [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit", 70 }; 71 72 static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid, 73 enum ib_gid_type gid_type); 74 75 const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event) 76 { 77 size_t index = event; 78 79 return (index < ARRAY_SIZE(cma_events) && cma_events[index]) ? 80 cma_events[index] : "unrecognized event"; 81 } 82 EXPORT_SYMBOL(rdma_event_msg); 83 84 const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id, 85 int reason) 86 { 87 if (rdma_ib_or_roce(id->device, id->port_num)) 88 return ibcm_reject_msg(reason); 89 90 if (rdma_protocol_iwarp(id->device, id->port_num)) 91 return iwcm_reject_msg(reason); 92 93 WARN_ON_ONCE(1); 94 return "unrecognized transport"; 95 } 96 EXPORT_SYMBOL(rdma_reject_msg); 97 98 /** 99 * rdma_is_consumer_reject - return true if the consumer rejected the connect 100 * request. 101 * @id: Communication identifier that received the REJECT event. 102 * @reason: Value returned in the REJECT event status field. 103 */ 104 static bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason) 105 { 106 if (rdma_ib_or_roce(id->device, id->port_num)) 107 return reason == IB_CM_REJ_CONSUMER_DEFINED; 108 109 if (rdma_protocol_iwarp(id->device, id->port_num)) 110 return reason == -ECONNREFUSED; 111 112 WARN_ON_ONCE(1); 113 return false; 114 } 115 116 const void *rdma_consumer_reject_data(struct rdma_cm_id *id, 117 struct rdma_cm_event *ev, u8 *data_len) 118 { 119 const void *p; 120 121 if (rdma_is_consumer_reject(id, ev->status)) { 122 *data_len = ev->param.conn.private_data_len; 123 p = ev->param.conn.private_data; 124 } else { 125 *data_len = 0; 126 p = NULL; 127 } 128 return p; 129 } 130 EXPORT_SYMBOL(rdma_consumer_reject_data); 131 132 /** 133 * rdma_iw_cm_id() - return the iw_cm_id pointer for this cm_id. 134 * @id: Communication Identifier 135 */ 136 struct iw_cm_id *rdma_iw_cm_id(struct rdma_cm_id *id) 137 { 138 struct rdma_id_private *id_priv; 139 140 id_priv = container_of(id, struct rdma_id_private, id); 141 if (id->device->node_type == RDMA_NODE_RNIC) 142 return id_priv->cm_id.iw; 143 return NULL; 144 } 145 EXPORT_SYMBOL(rdma_iw_cm_id); 146 147 /** 148 * rdma_res_to_id() - return the rdma_cm_id pointer for this restrack. 149 * @res: rdma resource tracking entry pointer 150 */ 151 struct rdma_cm_id *rdma_res_to_id(struct rdma_restrack_entry *res) 152 { 153 struct rdma_id_private *id_priv = 154 container_of(res, struct rdma_id_private, res); 155 156 return &id_priv->id; 157 } 158 EXPORT_SYMBOL(rdma_res_to_id); 159 160 static int cma_add_one(struct ib_device *device); 161 static void cma_remove_one(struct ib_device *device, void *client_data); 162 163 static struct ib_client cma_client = { 164 .name = "cma", 165 .add = cma_add_one, 166 .remove = cma_remove_one 167 }; 168 169 static struct ib_sa_client sa_client; 170 static LIST_HEAD(dev_list); 171 static LIST_HEAD(listen_any_list); 172 static DEFINE_MUTEX(lock); 173 static struct rb_root id_table = RB_ROOT; 174 /* Serialize operations of id_table tree */ 175 static DEFINE_SPINLOCK(id_table_lock); 176 static struct workqueue_struct *cma_wq; 177 static unsigned int cma_pernet_id; 178 179 struct cma_pernet { 180 struct xarray tcp_ps; 181 struct xarray udp_ps; 182 struct xarray ipoib_ps; 183 struct xarray ib_ps; 184 }; 185 186 static struct cma_pernet *cma_pernet(struct net *net) 187 { 188 return net_generic(net, cma_pernet_id); 189 } 190 191 static 192 struct xarray *cma_pernet_xa(struct net *net, enum rdma_ucm_port_space ps) 193 { 194 struct cma_pernet *pernet = cma_pernet(net); 195 196 switch (ps) { 197 case RDMA_PS_TCP: 198 return &pernet->tcp_ps; 199 case RDMA_PS_UDP: 200 return &pernet->udp_ps; 201 case RDMA_PS_IPOIB: 202 return &pernet->ipoib_ps; 203 case RDMA_PS_IB: 204 return &pernet->ib_ps; 205 default: 206 return NULL; 207 } 208 } 209 210 struct id_table_entry { 211 struct list_head id_list; 212 struct rb_node rb_node; 213 }; 214 215 struct cma_device { 216 struct list_head list; 217 struct ib_device *device; 218 struct completion comp; 219 refcount_t refcount; 220 struct list_head id_list; 221 enum ib_gid_type *default_gid_type; 222 u8 *default_roce_tos; 223 }; 224 225 struct rdma_bind_list { 226 enum rdma_ucm_port_space ps; 227 struct hlist_head owners; 228 unsigned short port; 229 }; 230 231 static int cma_ps_alloc(struct net *net, enum rdma_ucm_port_space ps, 232 struct rdma_bind_list *bind_list, int snum) 233 { 234 struct xarray *xa = cma_pernet_xa(net, ps); 235 236 return xa_insert(xa, snum, bind_list, GFP_KERNEL); 237 } 238 239 static struct rdma_bind_list *cma_ps_find(struct net *net, 240 enum rdma_ucm_port_space ps, int snum) 241 { 242 struct xarray *xa = cma_pernet_xa(net, ps); 243 244 return xa_load(xa, snum); 245 } 246 247 static void cma_ps_remove(struct net *net, enum rdma_ucm_port_space ps, 248 int snum) 249 { 250 struct xarray *xa = cma_pernet_xa(net, ps); 251 252 xa_erase(xa, snum); 253 } 254 255 enum { 256 CMA_OPTION_AFONLY, 257 }; 258 259 void cma_dev_get(struct cma_device *cma_dev) 260 { 261 refcount_inc(&cma_dev->refcount); 262 } 263 264 void cma_dev_put(struct cma_device *cma_dev) 265 { 266 if (refcount_dec_and_test(&cma_dev->refcount)) 267 complete(&cma_dev->comp); 268 } 269 270 struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter, 271 void *cookie) 272 { 273 struct cma_device *cma_dev; 274 struct cma_device *found_cma_dev = NULL; 275 276 mutex_lock(&lock); 277 278 list_for_each_entry(cma_dev, &dev_list, list) 279 if (filter(cma_dev->device, cookie)) { 280 found_cma_dev = cma_dev; 281 break; 282 } 283 284 if (found_cma_dev) 285 cma_dev_get(found_cma_dev); 286 mutex_unlock(&lock); 287 return found_cma_dev; 288 } 289 290 int cma_get_default_gid_type(struct cma_device *cma_dev, 291 u32 port) 292 { 293 if (!rdma_is_port_valid(cma_dev->device, port)) 294 return -EINVAL; 295 296 return cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)]; 297 } 298 299 int cma_set_default_gid_type(struct cma_device *cma_dev, 300 u32 port, 301 enum ib_gid_type default_gid_type) 302 { 303 unsigned long supported_gids; 304 305 if (!rdma_is_port_valid(cma_dev->device, port)) 306 return -EINVAL; 307 308 if (default_gid_type == IB_GID_TYPE_IB && 309 rdma_protocol_roce_eth_encap(cma_dev->device, port)) 310 default_gid_type = IB_GID_TYPE_ROCE; 311 312 supported_gids = roce_gid_type_mask_support(cma_dev->device, port); 313 314 if (!(supported_gids & 1 << default_gid_type)) 315 return -EINVAL; 316 317 cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)] = 318 default_gid_type; 319 320 return 0; 321 } 322 323 int cma_get_default_roce_tos(struct cma_device *cma_dev, u32 port) 324 { 325 if (!rdma_is_port_valid(cma_dev->device, port)) 326 return -EINVAL; 327 328 return cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)]; 329 } 330 331 int cma_set_default_roce_tos(struct cma_device *cma_dev, u32 port, 332 u8 default_roce_tos) 333 { 334 if (!rdma_is_port_valid(cma_dev->device, port)) 335 return -EINVAL; 336 337 cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)] = 338 default_roce_tos; 339 340 return 0; 341 } 342 struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev) 343 { 344 return cma_dev->device; 345 } 346 347 /* 348 * Device removal can occur at anytime, so we need extra handling to 349 * serialize notifying the user of device removal with other callbacks. 350 * We do this by disabling removal notification while a callback is in process, 351 * and reporting it after the callback completes. 352 */ 353 354 struct cma_multicast { 355 struct rdma_id_private *id_priv; 356 union { 357 struct ib_sa_multicast *sa_mc; 358 struct { 359 struct work_struct work; 360 struct rdma_cm_event event; 361 } iboe_join; 362 }; 363 struct list_head list; 364 void *context; 365 struct sockaddr_storage addr; 366 u8 join_state; 367 }; 368 369 struct cma_work { 370 struct work_struct work; 371 struct rdma_id_private *id; 372 enum rdma_cm_state old_state; 373 enum rdma_cm_state new_state; 374 struct rdma_cm_event event; 375 }; 376 377 union cma_ip_addr { 378 struct in6_addr ip6; 379 struct { 380 __be32 pad[3]; 381 __be32 addr; 382 } ip4; 383 }; 384 385 struct cma_hdr { 386 u8 cma_version; 387 u8 ip_version; /* IP version: 7:4 */ 388 __be16 port; 389 union cma_ip_addr src_addr; 390 union cma_ip_addr dst_addr; 391 }; 392 393 #define CMA_VERSION 0x00 394 395 struct cma_req_info { 396 struct sockaddr_storage listen_addr_storage; 397 struct sockaddr_storage src_addr_storage; 398 struct ib_device *device; 399 union ib_gid local_gid; 400 __be64 service_id; 401 int port; 402 bool has_gid; 403 u16 pkey; 404 }; 405 406 static int cma_comp_exch(struct rdma_id_private *id_priv, 407 enum rdma_cm_state comp, enum rdma_cm_state exch) 408 { 409 unsigned long flags; 410 int ret; 411 412 /* 413 * The FSM uses a funny double locking where state is protected by both 414 * the handler_mutex and the spinlock. State is not allowed to change 415 * to/from a handler_mutex protected value without also holding 416 * handler_mutex. 417 */ 418 if (comp == RDMA_CM_CONNECT || exch == RDMA_CM_CONNECT) 419 lockdep_assert_held(&id_priv->handler_mutex); 420 421 spin_lock_irqsave(&id_priv->lock, flags); 422 if ((ret = (id_priv->state == comp))) 423 id_priv->state = exch; 424 spin_unlock_irqrestore(&id_priv->lock, flags); 425 return ret; 426 } 427 428 static inline u8 cma_get_ip_ver(const struct cma_hdr *hdr) 429 { 430 return hdr->ip_version >> 4; 431 } 432 433 static void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver) 434 { 435 hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF); 436 } 437 438 static struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv) 439 { 440 return (struct sockaddr *)&id_priv->id.route.addr.src_addr; 441 } 442 443 static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv) 444 { 445 return (struct sockaddr *)&id_priv->id.route.addr.dst_addr; 446 } 447 448 static int cma_igmp_send(struct net_device *ndev, union ib_gid *mgid, bool join) 449 { 450 struct in_device *in_dev = NULL; 451 452 if (ndev) { 453 rtnl_lock(); 454 in_dev = __in_dev_get_rtnl(ndev); 455 if (in_dev) { 456 if (join) 457 ip_mc_inc_group(in_dev, 458 *(__be32 *)(mgid->raw + 12)); 459 else 460 ip_mc_dec_group(in_dev, 461 *(__be32 *)(mgid->raw + 12)); 462 } 463 rtnl_unlock(); 464 } 465 return (in_dev) ? 0 : -ENODEV; 466 } 467 468 static int compare_netdev_and_ip(int ifindex_a, struct sockaddr *sa, 469 struct id_table_entry *entry_b) 470 { 471 struct rdma_id_private *id_priv = list_first_entry( 472 &entry_b->id_list, struct rdma_id_private, id_list_entry); 473 int ifindex_b = id_priv->id.route.addr.dev_addr.bound_dev_if; 474 struct sockaddr *sb = cma_dst_addr(id_priv); 475 476 if (ifindex_a != ifindex_b) 477 return (ifindex_a > ifindex_b) ? 1 : -1; 478 479 if (sa->sa_family != sb->sa_family) 480 return sa->sa_family - sb->sa_family; 481 482 if (sa->sa_family == AF_INET && 483 __builtin_object_size(sa, 0) >= sizeof(struct sockaddr_in)) { 484 return memcmp(&((struct sockaddr_in *)sa)->sin_addr, 485 &((struct sockaddr_in *)sb)->sin_addr, 486 sizeof(((struct sockaddr_in *)sa)->sin_addr)); 487 } 488 489 if (sa->sa_family == AF_INET6 && 490 __builtin_object_size(sa, 0) >= sizeof(struct sockaddr_in6)) { 491 return ipv6_addr_cmp(&((struct sockaddr_in6 *)sa)->sin6_addr, 492 &((struct sockaddr_in6 *)sb)->sin6_addr); 493 } 494 495 return -1; 496 } 497 498 static int cma_add_id_to_tree(struct rdma_id_private *node_id_priv) 499 { 500 struct rb_node **new, *parent = NULL; 501 struct id_table_entry *this, *node; 502 unsigned long flags; 503 int result; 504 505 node = kzalloc(sizeof(*node), GFP_KERNEL); 506 if (!node) 507 return -ENOMEM; 508 509 spin_lock_irqsave(&id_table_lock, flags); 510 new = &id_table.rb_node; 511 while (*new) { 512 this = container_of(*new, struct id_table_entry, rb_node); 513 result = compare_netdev_and_ip( 514 node_id_priv->id.route.addr.dev_addr.bound_dev_if, 515 cma_dst_addr(node_id_priv), this); 516 517 parent = *new; 518 if (result < 0) 519 new = &((*new)->rb_left); 520 else if (result > 0) 521 new = &((*new)->rb_right); 522 else { 523 list_add_tail(&node_id_priv->id_list_entry, 524 &this->id_list); 525 kfree(node); 526 goto unlock; 527 } 528 } 529 530 INIT_LIST_HEAD(&node->id_list); 531 list_add_tail(&node_id_priv->id_list_entry, &node->id_list); 532 533 rb_link_node(&node->rb_node, parent, new); 534 rb_insert_color(&node->rb_node, &id_table); 535 536 unlock: 537 spin_unlock_irqrestore(&id_table_lock, flags); 538 return 0; 539 } 540 541 static struct id_table_entry * 542 node_from_ndev_ip(struct rb_root *root, int ifindex, struct sockaddr *sa) 543 { 544 struct rb_node *node = root->rb_node; 545 struct id_table_entry *data; 546 int result; 547 548 while (node) { 549 data = container_of(node, struct id_table_entry, rb_node); 550 result = compare_netdev_and_ip(ifindex, sa, data); 551 if (result < 0) 552 node = node->rb_left; 553 else if (result > 0) 554 node = node->rb_right; 555 else 556 return data; 557 } 558 559 return NULL; 560 } 561 562 static void cma_remove_id_from_tree(struct rdma_id_private *id_priv) 563 { 564 struct id_table_entry *data; 565 unsigned long flags; 566 567 spin_lock_irqsave(&id_table_lock, flags); 568 if (list_empty(&id_priv->id_list_entry)) 569 goto out; 570 571 data = node_from_ndev_ip(&id_table, 572 id_priv->id.route.addr.dev_addr.bound_dev_if, 573 cma_dst_addr(id_priv)); 574 if (!data) 575 goto out; 576 577 list_del_init(&id_priv->id_list_entry); 578 if (list_empty(&data->id_list)) { 579 rb_erase(&data->rb_node, &id_table); 580 kfree(data); 581 } 582 out: 583 spin_unlock_irqrestore(&id_table_lock, flags); 584 } 585 586 static void _cma_attach_to_dev(struct rdma_id_private *id_priv, 587 struct cma_device *cma_dev) 588 { 589 cma_dev_get(cma_dev); 590 id_priv->cma_dev = cma_dev; 591 id_priv->id.device = cma_dev->device; 592 id_priv->id.route.addr.dev_addr.transport = 593 rdma_node_get_transport(cma_dev->device->node_type); 594 list_add_tail(&id_priv->device_item, &cma_dev->id_list); 595 596 trace_cm_id_attach(id_priv, cma_dev->device); 597 } 598 599 static void cma_attach_to_dev(struct rdma_id_private *id_priv, 600 struct cma_device *cma_dev) 601 { 602 _cma_attach_to_dev(id_priv, cma_dev); 603 id_priv->gid_type = 604 cma_dev->default_gid_type[id_priv->id.port_num - 605 rdma_start_port(cma_dev->device)]; 606 } 607 608 static void cma_release_dev(struct rdma_id_private *id_priv) 609 { 610 mutex_lock(&lock); 611 list_del_init(&id_priv->device_item); 612 cma_dev_put(id_priv->cma_dev); 613 id_priv->cma_dev = NULL; 614 id_priv->id.device = NULL; 615 if (id_priv->id.route.addr.dev_addr.sgid_attr) { 616 rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr); 617 id_priv->id.route.addr.dev_addr.sgid_attr = NULL; 618 } 619 mutex_unlock(&lock); 620 } 621 622 static inline unsigned short cma_family(struct rdma_id_private *id_priv) 623 { 624 return id_priv->id.route.addr.src_addr.ss_family; 625 } 626 627 static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey) 628 { 629 struct ib_sa_mcmember_rec rec; 630 int ret = 0; 631 632 if (id_priv->qkey) { 633 if (qkey && id_priv->qkey != qkey) 634 return -EINVAL; 635 return 0; 636 } 637 638 if (qkey) { 639 id_priv->qkey = qkey; 640 return 0; 641 } 642 643 switch (id_priv->id.ps) { 644 case RDMA_PS_UDP: 645 case RDMA_PS_IB: 646 id_priv->qkey = RDMA_UDP_QKEY; 647 break; 648 case RDMA_PS_IPOIB: 649 ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid); 650 ret = ib_sa_get_mcmember_rec(id_priv->id.device, 651 id_priv->id.port_num, &rec.mgid, 652 &rec); 653 if (!ret) 654 id_priv->qkey = be32_to_cpu(rec.qkey); 655 break; 656 default: 657 break; 658 } 659 return ret; 660 } 661 662 static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr) 663 { 664 dev_addr->dev_type = ARPHRD_INFINIBAND; 665 rdma_addr_set_sgid(dev_addr, (union ib_gid *) &sib->sib_addr); 666 ib_addr_set_pkey(dev_addr, ntohs(sib->sib_pkey)); 667 } 668 669 static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) 670 { 671 int ret; 672 673 if (addr->sa_family != AF_IB) { 674 ret = rdma_translate_ip(addr, dev_addr); 675 } else { 676 cma_translate_ib((struct sockaddr_ib *) addr, dev_addr); 677 ret = 0; 678 } 679 680 return ret; 681 } 682 683 static const struct ib_gid_attr * 684 cma_validate_port(struct ib_device *device, u32 port, 685 enum ib_gid_type gid_type, 686 union ib_gid *gid, 687 struct rdma_id_private *id_priv) 688 { 689 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 690 int bound_if_index = dev_addr->bound_dev_if; 691 const struct ib_gid_attr *sgid_attr; 692 int dev_type = dev_addr->dev_type; 693 struct net_device *ndev = NULL; 694 695 if (!rdma_dev_access_netns(device, id_priv->id.route.addr.dev_addr.net)) 696 return ERR_PTR(-ENODEV); 697 698 if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port)) 699 return ERR_PTR(-ENODEV); 700 701 if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port)) 702 return ERR_PTR(-ENODEV); 703 704 if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) { 705 ndev = dev_get_by_index(dev_addr->net, bound_if_index); 706 if (!ndev) 707 return ERR_PTR(-ENODEV); 708 } else { 709 gid_type = IB_GID_TYPE_IB; 710 } 711 712 sgid_attr = rdma_find_gid_by_port(device, gid, gid_type, port, ndev); 713 dev_put(ndev); 714 return sgid_attr; 715 } 716 717 static void cma_bind_sgid_attr(struct rdma_id_private *id_priv, 718 const struct ib_gid_attr *sgid_attr) 719 { 720 WARN_ON(id_priv->id.route.addr.dev_addr.sgid_attr); 721 id_priv->id.route.addr.dev_addr.sgid_attr = sgid_attr; 722 } 723 724 /** 725 * cma_acquire_dev_by_src_ip - Acquire cma device, port, gid attribute 726 * based on source ip address. 727 * @id_priv: cm_id which should be bound to cma device 728 * 729 * cma_acquire_dev_by_src_ip() binds cm id to cma device, port and GID attribute 730 * based on source IP address. It returns 0 on success or error code otherwise. 731 * It is applicable to active and passive side cm_id. 732 */ 733 static int cma_acquire_dev_by_src_ip(struct rdma_id_private *id_priv) 734 { 735 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 736 const struct ib_gid_attr *sgid_attr; 737 union ib_gid gid, iboe_gid, *gidp; 738 struct cma_device *cma_dev; 739 enum ib_gid_type gid_type; 740 int ret = -ENODEV; 741 u32 port; 742 743 if (dev_addr->dev_type != ARPHRD_INFINIBAND && 744 id_priv->id.ps == RDMA_PS_IPOIB) 745 return -EINVAL; 746 747 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, 748 &iboe_gid); 749 750 memcpy(&gid, dev_addr->src_dev_addr + 751 rdma_addr_gid_offset(dev_addr), sizeof(gid)); 752 753 mutex_lock(&lock); 754 list_for_each_entry(cma_dev, &dev_list, list) { 755 rdma_for_each_port (cma_dev->device, port) { 756 gidp = rdma_protocol_roce(cma_dev->device, port) ? 757 &iboe_gid : &gid; 758 gid_type = cma_dev->default_gid_type[port - 1]; 759 sgid_attr = cma_validate_port(cma_dev->device, port, 760 gid_type, gidp, id_priv); 761 if (!IS_ERR(sgid_attr)) { 762 id_priv->id.port_num = port; 763 cma_bind_sgid_attr(id_priv, sgid_attr); 764 cma_attach_to_dev(id_priv, cma_dev); 765 ret = 0; 766 goto out; 767 } 768 } 769 } 770 out: 771 mutex_unlock(&lock); 772 return ret; 773 } 774 775 /** 776 * cma_ib_acquire_dev - Acquire cma device, port and SGID attribute 777 * @id_priv: cm id to bind to cma device 778 * @listen_id_priv: listener cm id to match against 779 * @req: Pointer to req structure containaining incoming 780 * request information 781 * cma_ib_acquire_dev() acquires cma device, port and SGID attribute when 782 * rdma device matches for listen_id and incoming request. It also verifies 783 * that a GID table entry is present for the source address. 784 * Returns 0 on success, or returns error code otherwise. 785 */ 786 static int cma_ib_acquire_dev(struct rdma_id_private *id_priv, 787 const struct rdma_id_private *listen_id_priv, 788 struct cma_req_info *req) 789 { 790 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 791 const struct ib_gid_attr *sgid_attr; 792 enum ib_gid_type gid_type; 793 union ib_gid gid; 794 795 if (dev_addr->dev_type != ARPHRD_INFINIBAND && 796 id_priv->id.ps == RDMA_PS_IPOIB) 797 return -EINVAL; 798 799 if (rdma_protocol_roce(req->device, req->port)) 800 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, 801 &gid); 802 else 803 memcpy(&gid, dev_addr->src_dev_addr + 804 rdma_addr_gid_offset(dev_addr), sizeof(gid)); 805 806 gid_type = listen_id_priv->cma_dev->default_gid_type[req->port - 1]; 807 sgid_attr = cma_validate_port(req->device, req->port, 808 gid_type, &gid, id_priv); 809 if (IS_ERR(sgid_attr)) 810 return PTR_ERR(sgid_attr); 811 812 id_priv->id.port_num = req->port; 813 cma_bind_sgid_attr(id_priv, sgid_attr); 814 /* Need to acquire lock to protect against reader 815 * of cma_dev->id_list such as cma_netdev_callback() and 816 * cma_process_remove(). 817 */ 818 mutex_lock(&lock); 819 cma_attach_to_dev(id_priv, listen_id_priv->cma_dev); 820 mutex_unlock(&lock); 821 rdma_restrack_add(&id_priv->res); 822 return 0; 823 } 824 825 static int cma_iw_acquire_dev(struct rdma_id_private *id_priv, 826 const struct rdma_id_private *listen_id_priv) 827 { 828 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 829 const struct ib_gid_attr *sgid_attr; 830 struct cma_device *cma_dev; 831 enum ib_gid_type gid_type; 832 int ret = -ENODEV; 833 union ib_gid gid; 834 u32 port; 835 836 if (dev_addr->dev_type != ARPHRD_INFINIBAND && 837 id_priv->id.ps == RDMA_PS_IPOIB) 838 return -EINVAL; 839 840 memcpy(&gid, dev_addr->src_dev_addr + 841 rdma_addr_gid_offset(dev_addr), sizeof(gid)); 842 843 mutex_lock(&lock); 844 845 cma_dev = listen_id_priv->cma_dev; 846 port = listen_id_priv->id.port_num; 847 gid_type = listen_id_priv->gid_type; 848 sgid_attr = cma_validate_port(cma_dev->device, port, 849 gid_type, &gid, id_priv); 850 if (!IS_ERR(sgid_attr)) { 851 id_priv->id.port_num = port; 852 cma_bind_sgid_attr(id_priv, sgid_attr); 853 ret = 0; 854 goto out; 855 } 856 857 list_for_each_entry(cma_dev, &dev_list, list) { 858 rdma_for_each_port (cma_dev->device, port) { 859 if (listen_id_priv->cma_dev == cma_dev && 860 listen_id_priv->id.port_num == port) 861 continue; 862 863 gid_type = cma_dev->default_gid_type[port - 1]; 864 sgid_attr = cma_validate_port(cma_dev->device, port, 865 gid_type, &gid, id_priv); 866 if (!IS_ERR(sgid_attr)) { 867 id_priv->id.port_num = port; 868 cma_bind_sgid_attr(id_priv, sgid_attr); 869 ret = 0; 870 goto out; 871 } 872 } 873 } 874 875 out: 876 if (!ret) { 877 cma_attach_to_dev(id_priv, cma_dev); 878 rdma_restrack_add(&id_priv->res); 879 } 880 881 mutex_unlock(&lock); 882 return ret; 883 } 884 885 /* 886 * Select the source IB device and address to reach the destination IB address. 887 */ 888 static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) 889 { 890 struct cma_device *cma_dev, *cur_dev; 891 struct sockaddr_ib *addr; 892 union ib_gid gid, sgid, *dgid; 893 unsigned int p; 894 u16 pkey, index; 895 enum ib_port_state port_state; 896 int ret; 897 int i; 898 899 cma_dev = NULL; 900 addr = (struct sockaddr_ib *) cma_dst_addr(id_priv); 901 dgid = (union ib_gid *) &addr->sib_addr; 902 pkey = ntohs(addr->sib_pkey); 903 904 mutex_lock(&lock); 905 list_for_each_entry(cur_dev, &dev_list, list) { 906 rdma_for_each_port (cur_dev->device, p) { 907 if (!rdma_cap_af_ib(cur_dev->device, p)) 908 continue; 909 910 if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index)) 911 continue; 912 913 if (ib_get_cached_port_state(cur_dev->device, p, &port_state)) 914 continue; 915 916 for (i = 0; i < cur_dev->device->port_data[p].immutable.gid_tbl_len; 917 ++i) { 918 ret = rdma_query_gid(cur_dev->device, p, i, 919 &gid); 920 if (ret) 921 continue; 922 923 if (!memcmp(&gid, dgid, sizeof(gid))) { 924 cma_dev = cur_dev; 925 sgid = gid; 926 id_priv->id.port_num = p; 927 goto found; 928 } 929 930 if (!cma_dev && (gid.global.subnet_prefix == 931 dgid->global.subnet_prefix) && 932 port_state == IB_PORT_ACTIVE) { 933 cma_dev = cur_dev; 934 sgid = gid; 935 id_priv->id.port_num = p; 936 goto found; 937 } 938 } 939 } 940 } 941 mutex_unlock(&lock); 942 return -ENODEV; 943 944 found: 945 cma_attach_to_dev(id_priv, cma_dev); 946 rdma_restrack_add(&id_priv->res); 947 mutex_unlock(&lock); 948 addr = (struct sockaddr_ib *)cma_src_addr(id_priv); 949 memcpy(&addr->sib_addr, &sgid, sizeof(sgid)); 950 cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); 951 return 0; 952 } 953 954 static void cma_id_get(struct rdma_id_private *id_priv) 955 { 956 refcount_inc(&id_priv->refcount); 957 } 958 959 static void cma_id_put(struct rdma_id_private *id_priv) 960 { 961 if (refcount_dec_and_test(&id_priv->refcount)) 962 complete(&id_priv->comp); 963 } 964 965 static struct rdma_id_private * 966 __rdma_create_id(struct net *net, rdma_cm_event_handler event_handler, 967 void *context, enum rdma_ucm_port_space ps, 968 enum ib_qp_type qp_type, const struct rdma_id_private *parent) 969 { 970 struct rdma_id_private *id_priv; 971 972 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL); 973 if (!id_priv) 974 return ERR_PTR(-ENOMEM); 975 976 id_priv->state = RDMA_CM_IDLE; 977 id_priv->id.context = context; 978 id_priv->id.event_handler = event_handler; 979 id_priv->id.ps = ps; 980 id_priv->id.qp_type = qp_type; 981 id_priv->tos_set = false; 982 id_priv->timeout_set = false; 983 id_priv->min_rnr_timer_set = false; 984 id_priv->gid_type = IB_GID_TYPE_IB; 985 spin_lock_init(&id_priv->lock); 986 mutex_init(&id_priv->qp_mutex); 987 init_completion(&id_priv->comp); 988 refcount_set(&id_priv->refcount, 1); 989 mutex_init(&id_priv->handler_mutex); 990 INIT_LIST_HEAD(&id_priv->device_item); 991 INIT_LIST_HEAD(&id_priv->id_list_entry); 992 INIT_LIST_HEAD(&id_priv->listen_list); 993 INIT_LIST_HEAD(&id_priv->mc_list); 994 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); 995 id_priv->id.route.addr.dev_addr.net = get_net(net); 996 id_priv->seq_num &= 0x00ffffff; 997 998 rdma_restrack_new(&id_priv->res, RDMA_RESTRACK_CM_ID); 999 if (parent) 1000 rdma_restrack_parent_name(&id_priv->res, &parent->res); 1001 1002 return id_priv; 1003 } 1004 1005 struct rdma_cm_id * 1006 __rdma_create_kernel_id(struct net *net, rdma_cm_event_handler event_handler, 1007 void *context, enum rdma_ucm_port_space ps, 1008 enum ib_qp_type qp_type, const char *caller) 1009 { 1010 struct rdma_id_private *ret; 1011 1012 ret = __rdma_create_id(net, event_handler, context, ps, qp_type, NULL); 1013 if (IS_ERR(ret)) 1014 return ERR_CAST(ret); 1015 1016 rdma_restrack_set_name(&ret->res, caller); 1017 return &ret->id; 1018 } 1019 EXPORT_SYMBOL(__rdma_create_kernel_id); 1020 1021 struct rdma_cm_id *rdma_create_user_id(rdma_cm_event_handler event_handler, 1022 void *context, 1023 enum rdma_ucm_port_space ps, 1024 enum ib_qp_type qp_type) 1025 { 1026 struct rdma_id_private *ret; 1027 1028 ret = __rdma_create_id(current->nsproxy->net_ns, event_handler, context, 1029 ps, qp_type, NULL); 1030 if (IS_ERR(ret)) 1031 return ERR_CAST(ret); 1032 1033 rdma_restrack_set_name(&ret->res, NULL); 1034 return &ret->id; 1035 } 1036 EXPORT_SYMBOL(rdma_create_user_id); 1037 1038 static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 1039 { 1040 struct ib_qp_attr qp_attr; 1041 int qp_attr_mask, ret; 1042 1043 qp_attr.qp_state = IB_QPS_INIT; 1044 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 1045 if (ret) 1046 return ret; 1047 1048 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); 1049 if (ret) 1050 return ret; 1051 1052 qp_attr.qp_state = IB_QPS_RTR; 1053 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE); 1054 if (ret) 1055 return ret; 1056 1057 qp_attr.qp_state = IB_QPS_RTS; 1058 qp_attr.sq_psn = 0; 1059 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN); 1060 1061 return ret; 1062 } 1063 1064 static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 1065 { 1066 struct ib_qp_attr qp_attr; 1067 int qp_attr_mask, ret; 1068 1069 qp_attr.qp_state = IB_QPS_INIT; 1070 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 1071 if (ret) 1072 return ret; 1073 1074 return ib_modify_qp(qp, &qp_attr, qp_attr_mask); 1075 } 1076 1077 int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, 1078 struct ib_qp_init_attr *qp_init_attr) 1079 { 1080 struct rdma_id_private *id_priv; 1081 struct ib_qp *qp; 1082 int ret; 1083 1084 id_priv = container_of(id, struct rdma_id_private, id); 1085 if (id->device != pd->device) { 1086 ret = -EINVAL; 1087 goto out_err; 1088 } 1089 1090 qp_init_attr->port_num = id->port_num; 1091 qp = ib_create_qp(pd, qp_init_attr); 1092 if (IS_ERR(qp)) { 1093 ret = PTR_ERR(qp); 1094 goto out_err; 1095 } 1096 1097 if (id->qp_type == IB_QPT_UD) 1098 ret = cma_init_ud_qp(id_priv, qp); 1099 else 1100 ret = cma_init_conn_qp(id_priv, qp); 1101 if (ret) 1102 goto out_destroy; 1103 1104 id->qp = qp; 1105 id_priv->qp_num = qp->qp_num; 1106 id_priv->srq = (qp->srq != NULL); 1107 trace_cm_qp_create(id_priv, pd, qp_init_attr, 0); 1108 return 0; 1109 out_destroy: 1110 ib_destroy_qp(qp); 1111 out_err: 1112 trace_cm_qp_create(id_priv, pd, qp_init_attr, ret); 1113 return ret; 1114 } 1115 EXPORT_SYMBOL(rdma_create_qp); 1116 1117 void rdma_destroy_qp(struct rdma_cm_id *id) 1118 { 1119 struct rdma_id_private *id_priv; 1120 1121 id_priv = container_of(id, struct rdma_id_private, id); 1122 trace_cm_qp_destroy(id_priv); 1123 mutex_lock(&id_priv->qp_mutex); 1124 ib_destroy_qp(id_priv->id.qp); 1125 id_priv->id.qp = NULL; 1126 mutex_unlock(&id_priv->qp_mutex); 1127 } 1128 EXPORT_SYMBOL(rdma_destroy_qp); 1129 1130 static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, 1131 struct rdma_conn_param *conn_param) 1132 { 1133 struct ib_qp_attr qp_attr; 1134 int qp_attr_mask, ret; 1135 1136 mutex_lock(&id_priv->qp_mutex); 1137 if (!id_priv->id.qp) { 1138 ret = 0; 1139 goto out; 1140 } 1141 1142 /* Need to update QP attributes from default values. */ 1143 qp_attr.qp_state = IB_QPS_INIT; 1144 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 1145 if (ret) 1146 goto out; 1147 1148 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 1149 if (ret) 1150 goto out; 1151 1152 qp_attr.qp_state = IB_QPS_RTR; 1153 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 1154 if (ret) 1155 goto out; 1156 1157 BUG_ON(id_priv->cma_dev->device != id_priv->id.device); 1158 1159 if (conn_param) 1160 qp_attr.max_dest_rd_atomic = conn_param->responder_resources; 1161 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 1162 out: 1163 mutex_unlock(&id_priv->qp_mutex); 1164 return ret; 1165 } 1166 1167 static int cma_modify_qp_rts(struct rdma_id_private *id_priv, 1168 struct rdma_conn_param *conn_param) 1169 { 1170 struct ib_qp_attr qp_attr; 1171 int qp_attr_mask, ret; 1172 1173 mutex_lock(&id_priv->qp_mutex); 1174 if (!id_priv->id.qp) { 1175 ret = 0; 1176 goto out; 1177 } 1178 1179 qp_attr.qp_state = IB_QPS_RTS; 1180 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 1181 if (ret) 1182 goto out; 1183 1184 if (conn_param) 1185 qp_attr.max_rd_atomic = conn_param->initiator_depth; 1186 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 1187 out: 1188 mutex_unlock(&id_priv->qp_mutex); 1189 return ret; 1190 } 1191 1192 static int cma_modify_qp_err(struct rdma_id_private *id_priv) 1193 { 1194 struct ib_qp_attr qp_attr; 1195 int ret; 1196 1197 mutex_lock(&id_priv->qp_mutex); 1198 if (!id_priv->id.qp) { 1199 ret = 0; 1200 goto out; 1201 } 1202 1203 qp_attr.qp_state = IB_QPS_ERR; 1204 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE); 1205 out: 1206 mutex_unlock(&id_priv->qp_mutex); 1207 return ret; 1208 } 1209 1210 static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, 1211 struct ib_qp_attr *qp_attr, int *qp_attr_mask) 1212 { 1213 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 1214 int ret; 1215 u16 pkey; 1216 1217 if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num)) 1218 pkey = 0xffff; 1219 else 1220 pkey = ib_addr_get_pkey(dev_addr); 1221 1222 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, 1223 pkey, &qp_attr->pkey_index); 1224 if (ret) 1225 return ret; 1226 1227 qp_attr->port_num = id_priv->id.port_num; 1228 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; 1229 1230 if (id_priv->id.qp_type == IB_QPT_UD) { 1231 ret = cma_set_qkey(id_priv, 0); 1232 if (ret) 1233 return ret; 1234 1235 qp_attr->qkey = id_priv->qkey; 1236 *qp_attr_mask |= IB_QP_QKEY; 1237 } else { 1238 qp_attr->qp_access_flags = 0; 1239 *qp_attr_mask |= IB_QP_ACCESS_FLAGS; 1240 } 1241 return 0; 1242 } 1243 1244 int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, 1245 int *qp_attr_mask) 1246 { 1247 struct rdma_id_private *id_priv; 1248 int ret = 0; 1249 1250 id_priv = container_of(id, struct rdma_id_private, id); 1251 if (rdma_cap_ib_cm(id->device, id->port_num)) { 1252 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) 1253 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); 1254 else 1255 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, 1256 qp_attr_mask); 1257 1258 if (qp_attr->qp_state == IB_QPS_RTR) 1259 qp_attr->rq_psn = id_priv->seq_num; 1260 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 1261 if (!id_priv->cm_id.iw) { 1262 qp_attr->qp_access_flags = 0; 1263 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; 1264 } else 1265 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, 1266 qp_attr_mask); 1267 qp_attr->port_num = id_priv->id.port_num; 1268 *qp_attr_mask |= IB_QP_PORT; 1269 } else { 1270 ret = -ENOSYS; 1271 } 1272 1273 if ((*qp_attr_mask & IB_QP_TIMEOUT) && id_priv->timeout_set) 1274 qp_attr->timeout = id_priv->timeout; 1275 1276 if ((*qp_attr_mask & IB_QP_MIN_RNR_TIMER) && id_priv->min_rnr_timer_set) 1277 qp_attr->min_rnr_timer = id_priv->min_rnr_timer; 1278 1279 return ret; 1280 } 1281 EXPORT_SYMBOL(rdma_init_qp_attr); 1282 1283 static inline bool cma_zero_addr(const struct sockaddr *addr) 1284 { 1285 switch (addr->sa_family) { 1286 case AF_INET: 1287 return ipv4_is_zeronet(((struct sockaddr_in *)addr)->sin_addr.s_addr); 1288 case AF_INET6: 1289 return ipv6_addr_any(&((struct sockaddr_in6 *)addr)->sin6_addr); 1290 case AF_IB: 1291 return ib_addr_any(&((struct sockaddr_ib *)addr)->sib_addr); 1292 default: 1293 return false; 1294 } 1295 } 1296 1297 static inline bool cma_loopback_addr(const struct sockaddr *addr) 1298 { 1299 switch (addr->sa_family) { 1300 case AF_INET: 1301 return ipv4_is_loopback( 1302 ((struct sockaddr_in *)addr)->sin_addr.s_addr); 1303 case AF_INET6: 1304 return ipv6_addr_loopback( 1305 &((struct sockaddr_in6 *)addr)->sin6_addr); 1306 case AF_IB: 1307 return ib_addr_loopback( 1308 &((struct sockaddr_ib *)addr)->sib_addr); 1309 default: 1310 return false; 1311 } 1312 } 1313 1314 static inline bool cma_any_addr(const struct sockaddr *addr) 1315 { 1316 return cma_zero_addr(addr) || cma_loopback_addr(addr); 1317 } 1318 1319 static int cma_addr_cmp(const struct sockaddr *src, const struct sockaddr *dst) 1320 { 1321 if (src->sa_family != dst->sa_family) 1322 return -1; 1323 1324 switch (src->sa_family) { 1325 case AF_INET: 1326 return ((struct sockaddr_in *)src)->sin_addr.s_addr != 1327 ((struct sockaddr_in *)dst)->sin_addr.s_addr; 1328 case AF_INET6: { 1329 struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *)src; 1330 struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *)dst; 1331 bool link_local; 1332 1333 if (ipv6_addr_cmp(&src_addr6->sin6_addr, 1334 &dst_addr6->sin6_addr)) 1335 return 1; 1336 link_local = ipv6_addr_type(&dst_addr6->sin6_addr) & 1337 IPV6_ADDR_LINKLOCAL; 1338 /* Link local must match their scope_ids */ 1339 return link_local ? (src_addr6->sin6_scope_id != 1340 dst_addr6->sin6_scope_id) : 1341 0; 1342 } 1343 1344 default: 1345 return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr, 1346 &((struct sockaddr_ib *) dst)->sib_addr); 1347 } 1348 } 1349 1350 static __be16 cma_port(const struct sockaddr *addr) 1351 { 1352 struct sockaddr_ib *sib; 1353 1354 switch (addr->sa_family) { 1355 case AF_INET: 1356 return ((struct sockaddr_in *) addr)->sin_port; 1357 case AF_INET6: 1358 return ((struct sockaddr_in6 *) addr)->sin6_port; 1359 case AF_IB: 1360 sib = (struct sockaddr_ib *) addr; 1361 return htons((u16) (be64_to_cpu(sib->sib_sid) & 1362 be64_to_cpu(sib->sib_sid_mask))); 1363 default: 1364 return 0; 1365 } 1366 } 1367 1368 static inline int cma_any_port(const struct sockaddr *addr) 1369 { 1370 return !cma_port(addr); 1371 } 1372 1373 static void cma_save_ib_info(struct sockaddr *src_addr, 1374 struct sockaddr *dst_addr, 1375 const struct rdma_cm_id *listen_id, 1376 const struct sa_path_rec *path) 1377 { 1378 struct sockaddr_ib *listen_ib, *ib; 1379 1380 listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr; 1381 if (src_addr) { 1382 ib = (struct sockaddr_ib *)src_addr; 1383 ib->sib_family = AF_IB; 1384 if (path) { 1385 ib->sib_pkey = path->pkey; 1386 ib->sib_flowinfo = path->flow_label; 1387 memcpy(&ib->sib_addr, &path->sgid, 16); 1388 ib->sib_sid = path->service_id; 1389 ib->sib_scope_id = 0; 1390 } else { 1391 ib->sib_pkey = listen_ib->sib_pkey; 1392 ib->sib_flowinfo = listen_ib->sib_flowinfo; 1393 ib->sib_addr = listen_ib->sib_addr; 1394 ib->sib_sid = listen_ib->sib_sid; 1395 ib->sib_scope_id = listen_ib->sib_scope_id; 1396 } 1397 ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL); 1398 } 1399 if (dst_addr) { 1400 ib = (struct sockaddr_ib *)dst_addr; 1401 ib->sib_family = AF_IB; 1402 if (path) { 1403 ib->sib_pkey = path->pkey; 1404 ib->sib_flowinfo = path->flow_label; 1405 memcpy(&ib->sib_addr, &path->dgid, 16); 1406 } 1407 } 1408 } 1409 1410 static void cma_save_ip4_info(struct sockaddr_in *src_addr, 1411 struct sockaddr_in *dst_addr, 1412 struct cma_hdr *hdr, 1413 __be16 local_port) 1414 { 1415 if (src_addr) { 1416 *src_addr = (struct sockaddr_in) { 1417 .sin_family = AF_INET, 1418 .sin_addr.s_addr = hdr->dst_addr.ip4.addr, 1419 .sin_port = local_port, 1420 }; 1421 } 1422 1423 if (dst_addr) { 1424 *dst_addr = (struct sockaddr_in) { 1425 .sin_family = AF_INET, 1426 .sin_addr.s_addr = hdr->src_addr.ip4.addr, 1427 .sin_port = hdr->port, 1428 }; 1429 } 1430 } 1431 1432 static void cma_save_ip6_info(struct sockaddr_in6 *src_addr, 1433 struct sockaddr_in6 *dst_addr, 1434 struct cma_hdr *hdr, 1435 __be16 local_port) 1436 { 1437 if (src_addr) { 1438 *src_addr = (struct sockaddr_in6) { 1439 .sin6_family = AF_INET6, 1440 .sin6_addr = hdr->dst_addr.ip6, 1441 .sin6_port = local_port, 1442 }; 1443 } 1444 1445 if (dst_addr) { 1446 *dst_addr = (struct sockaddr_in6) { 1447 .sin6_family = AF_INET6, 1448 .sin6_addr = hdr->src_addr.ip6, 1449 .sin6_port = hdr->port, 1450 }; 1451 } 1452 } 1453 1454 static u16 cma_port_from_service_id(__be64 service_id) 1455 { 1456 return (u16)be64_to_cpu(service_id); 1457 } 1458 1459 static int cma_save_ip_info(struct sockaddr *src_addr, 1460 struct sockaddr *dst_addr, 1461 const struct ib_cm_event *ib_event, 1462 __be64 service_id) 1463 { 1464 struct cma_hdr *hdr; 1465 __be16 port; 1466 1467 hdr = ib_event->private_data; 1468 if (hdr->cma_version != CMA_VERSION) 1469 return -EINVAL; 1470 1471 port = htons(cma_port_from_service_id(service_id)); 1472 1473 switch (cma_get_ip_ver(hdr)) { 1474 case 4: 1475 cma_save_ip4_info((struct sockaddr_in *)src_addr, 1476 (struct sockaddr_in *)dst_addr, hdr, port); 1477 break; 1478 case 6: 1479 cma_save_ip6_info((struct sockaddr_in6 *)src_addr, 1480 (struct sockaddr_in6 *)dst_addr, hdr, port); 1481 break; 1482 default: 1483 return -EAFNOSUPPORT; 1484 } 1485 1486 return 0; 1487 } 1488 1489 static int cma_save_net_info(struct sockaddr *src_addr, 1490 struct sockaddr *dst_addr, 1491 const struct rdma_cm_id *listen_id, 1492 const struct ib_cm_event *ib_event, 1493 sa_family_t sa_family, __be64 service_id) 1494 { 1495 if (sa_family == AF_IB) { 1496 if (ib_event->event == IB_CM_REQ_RECEIVED) 1497 cma_save_ib_info(src_addr, dst_addr, listen_id, 1498 ib_event->param.req_rcvd.primary_path); 1499 else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) 1500 cma_save_ib_info(src_addr, dst_addr, listen_id, NULL); 1501 return 0; 1502 } 1503 1504 return cma_save_ip_info(src_addr, dst_addr, ib_event, service_id); 1505 } 1506 1507 static int cma_save_req_info(const struct ib_cm_event *ib_event, 1508 struct cma_req_info *req) 1509 { 1510 const struct ib_cm_req_event_param *req_param = 1511 &ib_event->param.req_rcvd; 1512 const struct ib_cm_sidr_req_event_param *sidr_param = 1513 &ib_event->param.sidr_req_rcvd; 1514 1515 switch (ib_event->event) { 1516 case IB_CM_REQ_RECEIVED: 1517 req->device = req_param->listen_id->device; 1518 req->port = req_param->port; 1519 memcpy(&req->local_gid, &req_param->primary_path->sgid, 1520 sizeof(req->local_gid)); 1521 req->has_gid = true; 1522 req->service_id = req_param->primary_path->service_id; 1523 req->pkey = be16_to_cpu(req_param->primary_path->pkey); 1524 if (req->pkey != req_param->bth_pkey) 1525 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n" 1526 "RDMA CMA: in the future this may cause the request to be dropped\n", 1527 req_param->bth_pkey, req->pkey); 1528 break; 1529 case IB_CM_SIDR_REQ_RECEIVED: 1530 req->device = sidr_param->listen_id->device; 1531 req->port = sidr_param->port; 1532 req->has_gid = false; 1533 req->service_id = sidr_param->service_id; 1534 req->pkey = sidr_param->pkey; 1535 if (req->pkey != sidr_param->bth_pkey) 1536 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and SIDR request payload P_Key (0x%x)\n" 1537 "RDMA CMA: in the future this may cause the request to be dropped\n", 1538 sidr_param->bth_pkey, req->pkey); 1539 break; 1540 default: 1541 return -EINVAL; 1542 } 1543 1544 return 0; 1545 } 1546 1547 static bool validate_ipv4_net_dev(struct net_device *net_dev, 1548 const struct sockaddr_in *dst_addr, 1549 const struct sockaddr_in *src_addr) 1550 { 1551 __be32 daddr = dst_addr->sin_addr.s_addr, 1552 saddr = src_addr->sin_addr.s_addr; 1553 struct fib_result res; 1554 struct flowi4 fl4; 1555 int err; 1556 bool ret; 1557 1558 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) || 1559 ipv4_is_lbcast(daddr) || ipv4_is_zeronet(saddr) || 1560 ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr) || 1561 ipv4_is_loopback(saddr)) 1562 return false; 1563 1564 memset(&fl4, 0, sizeof(fl4)); 1565 fl4.flowi4_oif = net_dev->ifindex; 1566 fl4.daddr = daddr; 1567 fl4.saddr = saddr; 1568 1569 rcu_read_lock(); 1570 err = fib_lookup(dev_net(net_dev), &fl4, &res, 0); 1571 ret = err == 0 && FIB_RES_DEV(res) == net_dev; 1572 rcu_read_unlock(); 1573 1574 return ret; 1575 } 1576 1577 static bool validate_ipv6_net_dev(struct net_device *net_dev, 1578 const struct sockaddr_in6 *dst_addr, 1579 const struct sockaddr_in6 *src_addr) 1580 { 1581 #if IS_ENABLED(CONFIG_IPV6) 1582 const int strict = ipv6_addr_type(&dst_addr->sin6_addr) & 1583 IPV6_ADDR_LINKLOCAL; 1584 struct rt6_info *rt = rt6_lookup(dev_net(net_dev), &dst_addr->sin6_addr, 1585 &src_addr->sin6_addr, net_dev->ifindex, 1586 NULL, strict); 1587 bool ret; 1588 1589 if (!rt) 1590 return false; 1591 1592 ret = rt->rt6i_idev->dev == net_dev; 1593 ip6_rt_put(rt); 1594 1595 return ret; 1596 #else 1597 return false; 1598 #endif 1599 } 1600 1601 static bool validate_net_dev(struct net_device *net_dev, 1602 const struct sockaddr *daddr, 1603 const struct sockaddr *saddr) 1604 { 1605 const struct sockaddr_in *daddr4 = (const struct sockaddr_in *)daddr; 1606 const struct sockaddr_in *saddr4 = (const struct sockaddr_in *)saddr; 1607 const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr; 1608 const struct sockaddr_in6 *saddr6 = (const struct sockaddr_in6 *)saddr; 1609 1610 switch (daddr->sa_family) { 1611 case AF_INET: 1612 return saddr->sa_family == AF_INET && 1613 validate_ipv4_net_dev(net_dev, daddr4, saddr4); 1614 1615 case AF_INET6: 1616 return saddr->sa_family == AF_INET6 && 1617 validate_ipv6_net_dev(net_dev, daddr6, saddr6); 1618 1619 default: 1620 return false; 1621 } 1622 } 1623 1624 static struct net_device * 1625 roce_get_net_dev_by_cm_event(const struct ib_cm_event *ib_event) 1626 { 1627 const struct ib_gid_attr *sgid_attr = NULL; 1628 struct net_device *ndev; 1629 1630 if (ib_event->event == IB_CM_REQ_RECEIVED) 1631 sgid_attr = ib_event->param.req_rcvd.ppath_sgid_attr; 1632 else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) 1633 sgid_attr = ib_event->param.sidr_req_rcvd.sgid_attr; 1634 1635 if (!sgid_attr) 1636 return NULL; 1637 1638 rcu_read_lock(); 1639 ndev = rdma_read_gid_attr_ndev_rcu(sgid_attr); 1640 if (IS_ERR(ndev)) 1641 ndev = NULL; 1642 else 1643 dev_hold(ndev); 1644 rcu_read_unlock(); 1645 return ndev; 1646 } 1647 1648 static struct net_device *cma_get_net_dev(const struct ib_cm_event *ib_event, 1649 struct cma_req_info *req) 1650 { 1651 struct sockaddr *listen_addr = 1652 (struct sockaddr *)&req->listen_addr_storage; 1653 struct sockaddr *src_addr = (struct sockaddr *)&req->src_addr_storage; 1654 struct net_device *net_dev; 1655 const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL; 1656 int err; 1657 1658 err = cma_save_ip_info(listen_addr, src_addr, ib_event, 1659 req->service_id); 1660 if (err) 1661 return ERR_PTR(err); 1662 1663 if (rdma_protocol_roce(req->device, req->port)) 1664 net_dev = roce_get_net_dev_by_cm_event(ib_event); 1665 else 1666 net_dev = ib_get_net_dev_by_params(req->device, req->port, 1667 req->pkey, 1668 gid, listen_addr); 1669 if (!net_dev) 1670 return ERR_PTR(-ENODEV); 1671 1672 return net_dev; 1673 } 1674 1675 static enum rdma_ucm_port_space rdma_ps_from_service_id(__be64 service_id) 1676 { 1677 return (be64_to_cpu(service_id) >> 16) & 0xffff; 1678 } 1679 1680 static bool cma_match_private_data(struct rdma_id_private *id_priv, 1681 const struct cma_hdr *hdr) 1682 { 1683 struct sockaddr *addr = cma_src_addr(id_priv); 1684 __be32 ip4_addr; 1685 struct in6_addr ip6_addr; 1686 1687 if (cma_any_addr(addr) && !id_priv->afonly) 1688 return true; 1689 1690 switch (addr->sa_family) { 1691 case AF_INET: 1692 ip4_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr; 1693 if (cma_get_ip_ver(hdr) != 4) 1694 return false; 1695 if (!cma_any_addr(addr) && 1696 hdr->dst_addr.ip4.addr != ip4_addr) 1697 return false; 1698 break; 1699 case AF_INET6: 1700 ip6_addr = ((struct sockaddr_in6 *)addr)->sin6_addr; 1701 if (cma_get_ip_ver(hdr) != 6) 1702 return false; 1703 if (!cma_any_addr(addr) && 1704 memcmp(&hdr->dst_addr.ip6, &ip6_addr, sizeof(ip6_addr))) 1705 return false; 1706 break; 1707 case AF_IB: 1708 return true; 1709 default: 1710 return false; 1711 } 1712 1713 return true; 1714 } 1715 1716 static bool cma_protocol_roce(const struct rdma_cm_id *id) 1717 { 1718 struct ib_device *device = id->device; 1719 const u32 port_num = id->port_num ?: rdma_start_port(device); 1720 1721 return rdma_protocol_roce(device, port_num); 1722 } 1723 1724 static bool cma_is_req_ipv6_ll(const struct cma_req_info *req) 1725 { 1726 const struct sockaddr *daddr = 1727 (const struct sockaddr *)&req->listen_addr_storage; 1728 const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr; 1729 1730 /* Returns true if the req is for IPv6 link local */ 1731 return (daddr->sa_family == AF_INET6 && 1732 (ipv6_addr_type(&daddr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)); 1733 } 1734 1735 static bool cma_match_net_dev(const struct rdma_cm_id *id, 1736 const struct net_device *net_dev, 1737 const struct cma_req_info *req) 1738 { 1739 const struct rdma_addr *addr = &id->route.addr; 1740 1741 if (!net_dev) 1742 /* This request is an AF_IB request */ 1743 return (!id->port_num || id->port_num == req->port) && 1744 (addr->src_addr.ss_family == AF_IB); 1745 1746 /* 1747 * If the request is not for IPv6 link local, allow matching 1748 * request to any netdevice of the one or multiport rdma device. 1749 */ 1750 if (!cma_is_req_ipv6_ll(req)) 1751 return true; 1752 /* 1753 * Net namespaces must match, and if the listner is listening 1754 * on a specific netdevice than netdevice must match as well. 1755 */ 1756 if (net_eq(dev_net(net_dev), addr->dev_addr.net) && 1757 (!!addr->dev_addr.bound_dev_if == 1758 (addr->dev_addr.bound_dev_if == net_dev->ifindex))) 1759 return true; 1760 else 1761 return false; 1762 } 1763 1764 static struct rdma_id_private *cma_find_listener( 1765 const struct rdma_bind_list *bind_list, 1766 const struct ib_cm_id *cm_id, 1767 const struct ib_cm_event *ib_event, 1768 const struct cma_req_info *req, 1769 const struct net_device *net_dev) 1770 { 1771 struct rdma_id_private *id_priv, *id_priv_dev; 1772 1773 lockdep_assert_held(&lock); 1774 1775 if (!bind_list) 1776 return ERR_PTR(-EINVAL); 1777 1778 hlist_for_each_entry(id_priv, &bind_list->owners, node) { 1779 if (cma_match_private_data(id_priv, ib_event->private_data)) { 1780 if (id_priv->id.device == cm_id->device && 1781 cma_match_net_dev(&id_priv->id, net_dev, req)) 1782 return id_priv; 1783 list_for_each_entry(id_priv_dev, 1784 &id_priv->listen_list, 1785 listen_item) { 1786 if (id_priv_dev->id.device == cm_id->device && 1787 cma_match_net_dev(&id_priv_dev->id, 1788 net_dev, req)) 1789 return id_priv_dev; 1790 } 1791 } 1792 } 1793 1794 return ERR_PTR(-EINVAL); 1795 } 1796 1797 static struct rdma_id_private * 1798 cma_ib_id_from_event(struct ib_cm_id *cm_id, 1799 const struct ib_cm_event *ib_event, 1800 struct cma_req_info *req, 1801 struct net_device **net_dev) 1802 { 1803 struct rdma_bind_list *bind_list; 1804 struct rdma_id_private *id_priv; 1805 int err; 1806 1807 err = cma_save_req_info(ib_event, req); 1808 if (err) 1809 return ERR_PTR(err); 1810 1811 *net_dev = cma_get_net_dev(ib_event, req); 1812 if (IS_ERR(*net_dev)) { 1813 if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) { 1814 /* Assuming the protocol is AF_IB */ 1815 *net_dev = NULL; 1816 } else { 1817 return ERR_CAST(*net_dev); 1818 } 1819 } 1820 1821 mutex_lock(&lock); 1822 /* 1823 * Net namespace might be getting deleted while route lookup, 1824 * cm_id lookup is in progress. Therefore, perform netdevice 1825 * validation, cm_id lookup under rcu lock. 1826 * RCU lock along with netdevice state check, synchronizes with 1827 * netdevice migrating to different net namespace and also avoids 1828 * case where net namespace doesn't get deleted while lookup is in 1829 * progress. 1830 * If the device state is not IFF_UP, its properties such as ifindex 1831 * and nd_net cannot be trusted to remain valid without rcu lock. 1832 * net/core/dev.c change_net_namespace() ensures to synchronize with 1833 * ongoing operations on net device after device is closed using 1834 * synchronize_net(). 1835 */ 1836 rcu_read_lock(); 1837 if (*net_dev) { 1838 /* 1839 * If netdevice is down, it is likely that it is administratively 1840 * down or it might be migrating to different namespace. 1841 * In that case avoid further processing, as the net namespace 1842 * or ifindex may change. 1843 */ 1844 if (((*net_dev)->flags & IFF_UP) == 0) { 1845 id_priv = ERR_PTR(-EHOSTUNREACH); 1846 goto err; 1847 } 1848 1849 if (!validate_net_dev(*net_dev, 1850 (struct sockaddr *)&req->src_addr_storage, 1851 (struct sockaddr *)&req->listen_addr_storage)) { 1852 id_priv = ERR_PTR(-EHOSTUNREACH); 1853 goto err; 1854 } 1855 } 1856 1857 bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net, 1858 rdma_ps_from_service_id(req->service_id), 1859 cma_port_from_service_id(req->service_id)); 1860 id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev); 1861 err: 1862 rcu_read_unlock(); 1863 mutex_unlock(&lock); 1864 if (IS_ERR(id_priv) && *net_dev) { 1865 dev_put(*net_dev); 1866 *net_dev = NULL; 1867 } 1868 return id_priv; 1869 } 1870 1871 static inline u8 cma_user_data_offset(struct rdma_id_private *id_priv) 1872 { 1873 return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr); 1874 } 1875 1876 static void cma_cancel_route(struct rdma_id_private *id_priv) 1877 { 1878 if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) { 1879 if (id_priv->query) 1880 ib_sa_cancel_query(id_priv->query_id, id_priv->query); 1881 } 1882 } 1883 1884 static void _cma_cancel_listens(struct rdma_id_private *id_priv) 1885 { 1886 struct rdma_id_private *dev_id_priv; 1887 1888 lockdep_assert_held(&lock); 1889 1890 /* 1891 * Remove from listen_any_list to prevent added devices from spawning 1892 * additional listen requests. 1893 */ 1894 list_del_init(&id_priv->listen_any_item); 1895 1896 while (!list_empty(&id_priv->listen_list)) { 1897 dev_id_priv = 1898 list_first_entry(&id_priv->listen_list, 1899 struct rdma_id_private, listen_item); 1900 /* sync with device removal to avoid duplicate destruction */ 1901 list_del_init(&dev_id_priv->device_item); 1902 list_del_init(&dev_id_priv->listen_item); 1903 mutex_unlock(&lock); 1904 1905 rdma_destroy_id(&dev_id_priv->id); 1906 mutex_lock(&lock); 1907 } 1908 } 1909 1910 static void cma_cancel_listens(struct rdma_id_private *id_priv) 1911 { 1912 mutex_lock(&lock); 1913 _cma_cancel_listens(id_priv); 1914 mutex_unlock(&lock); 1915 } 1916 1917 static void cma_cancel_operation(struct rdma_id_private *id_priv, 1918 enum rdma_cm_state state) 1919 { 1920 switch (state) { 1921 case RDMA_CM_ADDR_QUERY: 1922 /* 1923 * We can avoid doing the rdma_addr_cancel() based on state, 1924 * only RDMA_CM_ADDR_QUERY has a work that could still execute. 1925 * Notice that the addr_handler work could still be exiting 1926 * outside this state, however due to the interaction with the 1927 * handler_mutex the work is guaranteed not to touch id_priv 1928 * during exit. 1929 */ 1930 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); 1931 break; 1932 case RDMA_CM_ROUTE_QUERY: 1933 cma_cancel_route(id_priv); 1934 break; 1935 case RDMA_CM_LISTEN: 1936 if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev) 1937 cma_cancel_listens(id_priv); 1938 break; 1939 default: 1940 break; 1941 } 1942 } 1943 1944 static void cma_release_port(struct rdma_id_private *id_priv) 1945 { 1946 struct rdma_bind_list *bind_list = id_priv->bind_list; 1947 struct net *net = id_priv->id.route.addr.dev_addr.net; 1948 1949 if (!bind_list) 1950 return; 1951 1952 mutex_lock(&lock); 1953 hlist_del(&id_priv->node); 1954 if (hlist_empty(&bind_list->owners)) { 1955 cma_ps_remove(net, bind_list->ps, bind_list->port); 1956 kfree(bind_list); 1957 } 1958 mutex_unlock(&lock); 1959 } 1960 1961 static void destroy_mc(struct rdma_id_private *id_priv, 1962 struct cma_multicast *mc) 1963 { 1964 bool send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN); 1965 1966 if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num)) 1967 ib_sa_free_multicast(mc->sa_mc); 1968 1969 if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) { 1970 struct rdma_dev_addr *dev_addr = 1971 &id_priv->id.route.addr.dev_addr; 1972 struct net_device *ndev = NULL; 1973 1974 if (dev_addr->bound_dev_if) 1975 ndev = dev_get_by_index(dev_addr->net, 1976 dev_addr->bound_dev_if); 1977 if (ndev && !send_only) { 1978 enum ib_gid_type gid_type; 1979 union ib_gid mgid; 1980 1981 gid_type = id_priv->cma_dev->default_gid_type 1982 [id_priv->id.port_num - 1983 rdma_start_port( 1984 id_priv->cma_dev->device)]; 1985 cma_iboe_set_mgid((struct sockaddr *)&mc->addr, &mgid, 1986 gid_type); 1987 cma_igmp_send(ndev, &mgid, false); 1988 } 1989 dev_put(ndev); 1990 1991 cancel_work_sync(&mc->iboe_join.work); 1992 } 1993 kfree(mc); 1994 } 1995 1996 static void cma_leave_mc_groups(struct rdma_id_private *id_priv) 1997 { 1998 struct cma_multicast *mc; 1999 2000 while (!list_empty(&id_priv->mc_list)) { 2001 mc = list_first_entry(&id_priv->mc_list, struct cma_multicast, 2002 list); 2003 list_del(&mc->list); 2004 destroy_mc(id_priv, mc); 2005 } 2006 } 2007 2008 static void _destroy_id(struct rdma_id_private *id_priv, 2009 enum rdma_cm_state state) 2010 { 2011 cma_cancel_operation(id_priv, state); 2012 2013 rdma_restrack_del(&id_priv->res); 2014 cma_remove_id_from_tree(id_priv); 2015 if (id_priv->cma_dev) { 2016 if (rdma_cap_ib_cm(id_priv->id.device, 1)) { 2017 if (id_priv->cm_id.ib) 2018 ib_destroy_cm_id(id_priv->cm_id.ib); 2019 } else if (rdma_cap_iw_cm(id_priv->id.device, 1)) { 2020 if (id_priv->cm_id.iw) 2021 iw_destroy_cm_id(id_priv->cm_id.iw); 2022 } 2023 cma_leave_mc_groups(id_priv); 2024 cma_release_dev(id_priv); 2025 } 2026 2027 cma_release_port(id_priv); 2028 cma_id_put(id_priv); 2029 wait_for_completion(&id_priv->comp); 2030 2031 if (id_priv->internal_id) 2032 cma_id_put(id_priv->id.context); 2033 2034 kfree(id_priv->id.route.path_rec); 2035 kfree(id_priv->id.route.path_rec_inbound); 2036 kfree(id_priv->id.route.path_rec_outbound); 2037 2038 put_net(id_priv->id.route.addr.dev_addr.net); 2039 kfree(id_priv); 2040 } 2041 2042 /* 2043 * destroy an ID from within the handler_mutex. This ensures that no other 2044 * handlers can start running concurrently. 2045 */ 2046 static void destroy_id_handler_unlock(struct rdma_id_private *id_priv) 2047 __releases(&idprv->handler_mutex) 2048 { 2049 enum rdma_cm_state state; 2050 unsigned long flags; 2051 2052 trace_cm_id_destroy(id_priv); 2053 2054 /* 2055 * Setting the state to destroyed under the handler mutex provides a 2056 * fence against calling handler callbacks. If this is invoked due to 2057 * the failure of a handler callback then it guarentees that no future 2058 * handlers will be called. 2059 */ 2060 lockdep_assert_held(&id_priv->handler_mutex); 2061 spin_lock_irqsave(&id_priv->lock, flags); 2062 state = id_priv->state; 2063 id_priv->state = RDMA_CM_DESTROYING; 2064 spin_unlock_irqrestore(&id_priv->lock, flags); 2065 mutex_unlock(&id_priv->handler_mutex); 2066 _destroy_id(id_priv, state); 2067 } 2068 2069 void rdma_destroy_id(struct rdma_cm_id *id) 2070 { 2071 struct rdma_id_private *id_priv = 2072 container_of(id, struct rdma_id_private, id); 2073 2074 mutex_lock(&id_priv->handler_mutex); 2075 destroy_id_handler_unlock(id_priv); 2076 } 2077 EXPORT_SYMBOL(rdma_destroy_id); 2078 2079 static int cma_rep_recv(struct rdma_id_private *id_priv) 2080 { 2081 int ret; 2082 2083 ret = cma_modify_qp_rtr(id_priv, NULL); 2084 if (ret) 2085 goto reject; 2086 2087 ret = cma_modify_qp_rts(id_priv, NULL); 2088 if (ret) 2089 goto reject; 2090 2091 trace_cm_send_rtu(id_priv); 2092 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0); 2093 if (ret) 2094 goto reject; 2095 2096 return 0; 2097 reject: 2098 pr_debug_ratelimited("RDMA CM: CONNECT_ERROR: failed to handle reply. status %d\n", ret); 2099 cma_modify_qp_err(id_priv); 2100 trace_cm_send_rej(id_priv); 2101 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, 2102 NULL, 0, NULL, 0); 2103 return ret; 2104 } 2105 2106 static void cma_set_rep_event_data(struct rdma_cm_event *event, 2107 const struct ib_cm_rep_event_param *rep_data, 2108 void *private_data) 2109 { 2110 event->param.conn.private_data = private_data; 2111 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE; 2112 event->param.conn.responder_resources = rep_data->responder_resources; 2113 event->param.conn.initiator_depth = rep_data->initiator_depth; 2114 event->param.conn.flow_control = rep_data->flow_control; 2115 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count; 2116 event->param.conn.srq = rep_data->srq; 2117 event->param.conn.qp_num = rep_data->remote_qpn; 2118 2119 event->ece.vendor_id = rep_data->ece.vendor_id; 2120 event->ece.attr_mod = rep_data->ece.attr_mod; 2121 } 2122 2123 static int cma_cm_event_handler(struct rdma_id_private *id_priv, 2124 struct rdma_cm_event *event) 2125 { 2126 int ret; 2127 2128 lockdep_assert_held(&id_priv->handler_mutex); 2129 2130 trace_cm_event_handler(id_priv, event); 2131 ret = id_priv->id.event_handler(&id_priv->id, event); 2132 trace_cm_event_done(id_priv, event, ret); 2133 return ret; 2134 } 2135 2136 static int cma_ib_handler(struct ib_cm_id *cm_id, 2137 const struct ib_cm_event *ib_event) 2138 { 2139 struct rdma_id_private *id_priv = cm_id->context; 2140 struct rdma_cm_event event = {}; 2141 enum rdma_cm_state state; 2142 int ret; 2143 2144 mutex_lock(&id_priv->handler_mutex); 2145 state = READ_ONCE(id_priv->state); 2146 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && 2147 state != RDMA_CM_CONNECT) || 2148 (ib_event->event == IB_CM_TIMEWAIT_EXIT && 2149 state != RDMA_CM_DISCONNECT)) 2150 goto out; 2151 2152 switch (ib_event->event) { 2153 case IB_CM_REQ_ERROR: 2154 case IB_CM_REP_ERROR: 2155 event.event = RDMA_CM_EVENT_UNREACHABLE; 2156 event.status = -ETIMEDOUT; 2157 break; 2158 case IB_CM_REP_RECEIVED: 2159 if (state == RDMA_CM_CONNECT && 2160 (id_priv->id.qp_type != IB_QPT_UD)) { 2161 trace_cm_send_mra(id_priv); 2162 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); 2163 } 2164 if (id_priv->id.qp) { 2165 event.status = cma_rep_recv(id_priv); 2166 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR : 2167 RDMA_CM_EVENT_ESTABLISHED; 2168 } else { 2169 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE; 2170 } 2171 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd, 2172 ib_event->private_data); 2173 break; 2174 case IB_CM_RTU_RECEIVED: 2175 case IB_CM_USER_ESTABLISHED: 2176 event.event = RDMA_CM_EVENT_ESTABLISHED; 2177 break; 2178 case IB_CM_DREQ_ERROR: 2179 event.status = -ETIMEDOUT; 2180 fallthrough; 2181 case IB_CM_DREQ_RECEIVED: 2182 case IB_CM_DREP_RECEIVED: 2183 if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT, 2184 RDMA_CM_DISCONNECT)) 2185 goto out; 2186 event.event = RDMA_CM_EVENT_DISCONNECTED; 2187 break; 2188 case IB_CM_TIMEWAIT_EXIT: 2189 event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT; 2190 break; 2191 case IB_CM_MRA_RECEIVED: 2192 /* ignore event */ 2193 goto out; 2194 case IB_CM_REJ_RECEIVED: 2195 pr_debug_ratelimited("RDMA CM: REJECTED: %s\n", rdma_reject_msg(&id_priv->id, 2196 ib_event->param.rej_rcvd.reason)); 2197 cma_modify_qp_err(id_priv); 2198 event.status = ib_event->param.rej_rcvd.reason; 2199 event.event = RDMA_CM_EVENT_REJECTED; 2200 event.param.conn.private_data = ib_event->private_data; 2201 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; 2202 break; 2203 default: 2204 pr_err("RDMA CMA: unexpected IB CM event: %d\n", 2205 ib_event->event); 2206 goto out; 2207 } 2208 2209 ret = cma_cm_event_handler(id_priv, &event); 2210 if (ret) { 2211 /* Destroy the CM ID by returning a non-zero value. */ 2212 id_priv->cm_id.ib = NULL; 2213 destroy_id_handler_unlock(id_priv); 2214 return ret; 2215 } 2216 out: 2217 mutex_unlock(&id_priv->handler_mutex); 2218 return 0; 2219 } 2220 2221 static struct rdma_id_private * 2222 cma_ib_new_conn_id(const struct rdma_cm_id *listen_id, 2223 const struct ib_cm_event *ib_event, 2224 struct net_device *net_dev) 2225 { 2226 struct rdma_id_private *listen_id_priv; 2227 struct rdma_id_private *id_priv; 2228 struct rdma_cm_id *id; 2229 struct rdma_route *rt; 2230 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; 2231 struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path; 2232 const __be64 service_id = 2233 ib_event->param.req_rcvd.primary_path->service_id; 2234 int ret; 2235 2236 listen_id_priv = container_of(listen_id, struct rdma_id_private, id); 2237 id_priv = __rdma_create_id(listen_id->route.addr.dev_addr.net, 2238 listen_id->event_handler, listen_id->context, 2239 listen_id->ps, 2240 ib_event->param.req_rcvd.qp_type, 2241 listen_id_priv); 2242 if (IS_ERR(id_priv)) 2243 return NULL; 2244 2245 id = &id_priv->id; 2246 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, 2247 (struct sockaddr *)&id->route.addr.dst_addr, 2248 listen_id, ib_event, ss_family, service_id)) 2249 goto err; 2250 2251 rt = &id->route; 2252 rt->num_pri_alt_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1; 2253 rt->path_rec = kmalloc_array(rt->num_pri_alt_paths, 2254 sizeof(*rt->path_rec), GFP_KERNEL); 2255 if (!rt->path_rec) 2256 goto err; 2257 2258 rt->path_rec[0] = *path; 2259 if (rt->num_pri_alt_paths == 2) 2260 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; 2261 2262 if (net_dev) { 2263 rdma_copy_src_l2_addr(&rt->addr.dev_addr, net_dev); 2264 } else { 2265 if (!cma_protocol_roce(listen_id) && 2266 cma_any_addr(cma_src_addr(id_priv))) { 2267 rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND; 2268 rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); 2269 ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey)); 2270 } else if (!cma_any_addr(cma_src_addr(id_priv))) { 2271 ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr); 2272 if (ret) 2273 goto err; 2274 } 2275 } 2276 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); 2277 2278 id_priv->state = RDMA_CM_CONNECT; 2279 return id_priv; 2280 2281 err: 2282 rdma_destroy_id(id); 2283 return NULL; 2284 } 2285 2286 static struct rdma_id_private * 2287 cma_ib_new_udp_id(const struct rdma_cm_id *listen_id, 2288 const struct ib_cm_event *ib_event, 2289 struct net_device *net_dev) 2290 { 2291 const struct rdma_id_private *listen_id_priv; 2292 struct rdma_id_private *id_priv; 2293 struct rdma_cm_id *id; 2294 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; 2295 struct net *net = listen_id->route.addr.dev_addr.net; 2296 int ret; 2297 2298 listen_id_priv = container_of(listen_id, struct rdma_id_private, id); 2299 id_priv = __rdma_create_id(net, listen_id->event_handler, 2300 listen_id->context, listen_id->ps, IB_QPT_UD, 2301 listen_id_priv); 2302 if (IS_ERR(id_priv)) 2303 return NULL; 2304 2305 id = &id_priv->id; 2306 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, 2307 (struct sockaddr *)&id->route.addr.dst_addr, 2308 listen_id, ib_event, ss_family, 2309 ib_event->param.sidr_req_rcvd.service_id)) 2310 goto err; 2311 2312 if (net_dev) { 2313 rdma_copy_src_l2_addr(&id->route.addr.dev_addr, net_dev); 2314 } else { 2315 if (!cma_any_addr(cma_src_addr(id_priv))) { 2316 ret = cma_translate_addr(cma_src_addr(id_priv), 2317 &id->route.addr.dev_addr); 2318 if (ret) 2319 goto err; 2320 } 2321 } 2322 2323 id_priv->state = RDMA_CM_CONNECT; 2324 return id_priv; 2325 err: 2326 rdma_destroy_id(id); 2327 return NULL; 2328 } 2329 2330 static void cma_set_req_event_data(struct rdma_cm_event *event, 2331 const struct ib_cm_req_event_param *req_data, 2332 void *private_data, int offset) 2333 { 2334 event->param.conn.private_data = private_data + offset; 2335 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset; 2336 event->param.conn.responder_resources = req_data->responder_resources; 2337 event->param.conn.initiator_depth = req_data->initiator_depth; 2338 event->param.conn.flow_control = req_data->flow_control; 2339 event->param.conn.retry_count = req_data->retry_count; 2340 event->param.conn.rnr_retry_count = req_data->rnr_retry_count; 2341 event->param.conn.srq = req_data->srq; 2342 event->param.conn.qp_num = req_data->remote_qpn; 2343 2344 event->ece.vendor_id = req_data->ece.vendor_id; 2345 event->ece.attr_mod = req_data->ece.attr_mod; 2346 } 2347 2348 static int cma_ib_check_req_qp_type(const struct rdma_cm_id *id, 2349 const struct ib_cm_event *ib_event) 2350 { 2351 return (((ib_event->event == IB_CM_REQ_RECEIVED) && 2352 (ib_event->param.req_rcvd.qp_type == id->qp_type)) || 2353 ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) && 2354 (id->qp_type == IB_QPT_UD)) || 2355 (!id->qp_type)); 2356 } 2357 2358 static int cma_ib_req_handler(struct ib_cm_id *cm_id, 2359 const struct ib_cm_event *ib_event) 2360 { 2361 struct rdma_id_private *listen_id, *conn_id = NULL; 2362 struct rdma_cm_event event = {}; 2363 struct cma_req_info req = {}; 2364 struct net_device *net_dev; 2365 u8 offset; 2366 int ret; 2367 2368 listen_id = cma_ib_id_from_event(cm_id, ib_event, &req, &net_dev); 2369 if (IS_ERR(listen_id)) 2370 return PTR_ERR(listen_id); 2371 2372 trace_cm_req_handler(listen_id, ib_event->event); 2373 if (!cma_ib_check_req_qp_type(&listen_id->id, ib_event)) { 2374 ret = -EINVAL; 2375 goto net_dev_put; 2376 } 2377 2378 mutex_lock(&listen_id->handler_mutex); 2379 if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN) { 2380 ret = -ECONNABORTED; 2381 goto err_unlock; 2382 } 2383 2384 offset = cma_user_data_offset(listen_id); 2385 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 2386 if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) { 2387 conn_id = cma_ib_new_udp_id(&listen_id->id, ib_event, net_dev); 2388 event.param.ud.private_data = ib_event->private_data + offset; 2389 event.param.ud.private_data_len = 2390 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset; 2391 } else { 2392 conn_id = cma_ib_new_conn_id(&listen_id->id, ib_event, net_dev); 2393 cma_set_req_event_data(&event, &ib_event->param.req_rcvd, 2394 ib_event->private_data, offset); 2395 } 2396 if (!conn_id) { 2397 ret = -ENOMEM; 2398 goto err_unlock; 2399 } 2400 2401 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 2402 ret = cma_ib_acquire_dev(conn_id, listen_id, &req); 2403 if (ret) { 2404 destroy_id_handler_unlock(conn_id); 2405 goto err_unlock; 2406 } 2407 2408 conn_id->cm_id.ib = cm_id; 2409 cm_id->context = conn_id; 2410 cm_id->cm_handler = cma_ib_handler; 2411 2412 ret = cma_cm_event_handler(conn_id, &event); 2413 if (ret) { 2414 /* Destroy the CM ID by returning a non-zero value. */ 2415 conn_id->cm_id.ib = NULL; 2416 mutex_unlock(&listen_id->handler_mutex); 2417 destroy_id_handler_unlock(conn_id); 2418 goto net_dev_put; 2419 } 2420 2421 if (READ_ONCE(conn_id->state) == RDMA_CM_CONNECT && 2422 conn_id->id.qp_type != IB_QPT_UD) { 2423 trace_cm_send_mra(cm_id->context); 2424 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); 2425 } 2426 mutex_unlock(&conn_id->handler_mutex); 2427 2428 err_unlock: 2429 mutex_unlock(&listen_id->handler_mutex); 2430 2431 net_dev_put: 2432 dev_put(net_dev); 2433 2434 return ret; 2435 } 2436 2437 __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr) 2438 { 2439 if (addr->sa_family == AF_IB) 2440 return ((struct sockaddr_ib *) addr)->sib_sid; 2441 2442 return cpu_to_be64(((u64)id->ps << 16) + be16_to_cpu(cma_port(addr))); 2443 } 2444 EXPORT_SYMBOL(rdma_get_service_id); 2445 2446 void rdma_read_gids(struct rdma_cm_id *cm_id, union ib_gid *sgid, 2447 union ib_gid *dgid) 2448 { 2449 struct rdma_addr *addr = &cm_id->route.addr; 2450 2451 if (!cm_id->device) { 2452 if (sgid) 2453 memset(sgid, 0, sizeof(*sgid)); 2454 if (dgid) 2455 memset(dgid, 0, sizeof(*dgid)); 2456 return; 2457 } 2458 2459 if (rdma_protocol_roce(cm_id->device, cm_id->port_num)) { 2460 if (sgid) 2461 rdma_ip2gid((struct sockaddr *)&addr->src_addr, sgid); 2462 if (dgid) 2463 rdma_ip2gid((struct sockaddr *)&addr->dst_addr, dgid); 2464 } else { 2465 if (sgid) 2466 rdma_addr_get_sgid(&addr->dev_addr, sgid); 2467 if (dgid) 2468 rdma_addr_get_dgid(&addr->dev_addr, dgid); 2469 } 2470 } 2471 EXPORT_SYMBOL(rdma_read_gids); 2472 2473 static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) 2474 { 2475 struct rdma_id_private *id_priv = iw_id->context; 2476 struct rdma_cm_event event = {}; 2477 int ret = 0; 2478 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; 2479 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; 2480 2481 mutex_lock(&id_priv->handler_mutex); 2482 if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT) 2483 goto out; 2484 2485 switch (iw_event->event) { 2486 case IW_CM_EVENT_CLOSE: 2487 event.event = RDMA_CM_EVENT_DISCONNECTED; 2488 break; 2489 case IW_CM_EVENT_CONNECT_REPLY: 2490 memcpy(cma_src_addr(id_priv), laddr, 2491 rdma_addr_size(laddr)); 2492 memcpy(cma_dst_addr(id_priv), raddr, 2493 rdma_addr_size(raddr)); 2494 switch (iw_event->status) { 2495 case 0: 2496 event.event = RDMA_CM_EVENT_ESTABLISHED; 2497 event.param.conn.initiator_depth = iw_event->ird; 2498 event.param.conn.responder_resources = iw_event->ord; 2499 break; 2500 case -ECONNRESET: 2501 case -ECONNREFUSED: 2502 event.event = RDMA_CM_EVENT_REJECTED; 2503 break; 2504 case -ETIMEDOUT: 2505 event.event = RDMA_CM_EVENT_UNREACHABLE; 2506 break; 2507 default: 2508 event.event = RDMA_CM_EVENT_CONNECT_ERROR; 2509 break; 2510 } 2511 break; 2512 case IW_CM_EVENT_ESTABLISHED: 2513 event.event = RDMA_CM_EVENT_ESTABLISHED; 2514 event.param.conn.initiator_depth = iw_event->ird; 2515 event.param.conn.responder_resources = iw_event->ord; 2516 break; 2517 default: 2518 goto out; 2519 } 2520 2521 event.status = iw_event->status; 2522 event.param.conn.private_data = iw_event->private_data; 2523 event.param.conn.private_data_len = iw_event->private_data_len; 2524 ret = cma_cm_event_handler(id_priv, &event); 2525 if (ret) { 2526 /* Destroy the CM ID by returning a non-zero value. */ 2527 id_priv->cm_id.iw = NULL; 2528 destroy_id_handler_unlock(id_priv); 2529 return ret; 2530 } 2531 2532 out: 2533 mutex_unlock(&id_priv->handler_mutex); 2534 return ret; 2535 } 2536 2537 static int iw_conn_req_handler(struct iw_cm_id *cm_id, 2538 struct iw_cm_event *iw_event) 2539 { 2540 struct rdma_id_private *listen_id, *conn_id; 2541 struct rdma_cm_event event = {}; 2542 int ret = -ECONNABORTED; 2543 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; 2544 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; 2545 2546 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 2547 event.param.conn.private_data = iw_event->private_data; 2548 event.param.conn.private_data_len = iw_event->private_data_len; 2549 event.param.conn.initiator_depth = iw_event->ird; 2550 event.param.conn.responder_resources = iw_event->ord; 2551 2552 listen_id = cm_id->context; 2553 2554 mutex_lock(&listen_id->handler_mutex); 2555 if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN) 2556 goto out; 2557 2558 /* Create a new RDMA id for the new IW CM ID */ 2559 conn_id = __rdma_create_id(listen_id->id.route.addr.dev_addr.net, 2560 listen_id->id.event_handler, 2561 listen_id->id.context, RDMA_PS_TCP, 2562 IB_QPT_RC, listen_id); 2563 if (IS_ERR(conn_id)) { 2564 ret = -ENOMEM; 2565 goto out; 2566 } 2567 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 2568 conn_id->state = RDMA_CM_CONNECT; 2569 2570 ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr); 2571 if (ret) { 2572 mutex_unlock(&listen_id->handler_mutex); 2573 destroy_id_handler_unlock(conn_id); 2574 return ret; 2575 } 2576 2577 ret = cma_iw_acquire_dev(conn_id, listen_id); 2578 if (ret) { 2579 mutex_unlock(&listen_id->handler_mutex); 2580 destroy_id_handler_unlock(conn_id); 2581 return ret; 2582 } 2583 2584 conn_id->cm_id.iw = cm_id; 2585 cm_id->context = conn_id; 2586 cm_id->cm_handler = cma_iw_handler; 2587 2588 memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr)); 2589 memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr)); 2590 2591 ret = cma_cm_event_handler(conn_id, &event); 2592 if (ret) { 2593 /* User wants to destroy the CM ID */ 2594 conn_id->cm_id.iw = NULL; 2595 mutex_unlock(&listen_id->handler_mutex); 2596 destroy_id_handler_unlock(conn_id); 2597 return ret; 2598 } 2599 2600 mutex_unlock(&conn_id->handler_mutex); 2601 2602 out: 2603 mutex_unlock(&listen_id->handler_mutex); 2604 return ret; 2605 } 2606 2607 static int cma_ib_listen(struct rdma_id_private *id_priv) 2608 { 2609 struct sockaddr *addr; 2610 struct ib_cm_id *id; 2611 __be64 svc_id; 2612 2613 addr = cma_src_addr(id_priv); 2614 svc_id = rdma_get_service_id(&id_priv->id, addr); 2615 id = ib_cm_insert_listen(id_priv->id.device, 2616 cma_ib_req_handler, svc_id); 2617 if (IS_ERR(id)) 2618 return PTR_ERR(id); 2619 id_priv->cm_id.ib = id; 2620 2621 return 0; 2622 } 2623 2624 static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) 2625 { 2626 int ret; 2627 struct iw_cm_id *id; 2628 2629 id = iw_create_cm_id(id_priv->id.device, 2630 iw_conn_req_handler, 2631 id_priv); 2632 if (IS_ERR(id)) 2633 return PTR_ERR(id); 2634 2635 mutex_lock(&id_priv->qp_mutex); 2636 id->tos = id_priv->tos; 2637 id->tos_set = id_priv->tos_set; 2638 mutex_unlock(&id_priv->qp_mutex); 2639 id->afonly = id_priv->afonly; 2640 id_priv->cm_id.iw = id; 2641 2642 memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv), 2643 rdma_addr_size(cma_src_addr(id_priv))); 2644 2645 ret = iw_cm_listen(id_priv->cm_id.iw, backlog); 2646 2647 if (ret) { 2648 iw_destroy_cm_id(id_priv->cm_id.iw); 2649 id_priv->cm_id.iw = NULL; 2650 } 2651 2652 return ret; 2653 } 2654 2655 static int cma_listen_handler(struct rdma_cm_id *id, 2656 struct rdma_cm_event *event) 2657 { 2658 struct rdma_id_private *id_priv = id->context; 2659 2660 /* Listening IDs are always destroyed on removal */ 2661 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) 2662 return -1; 2663 2664 id->context = id_priv->id.context; 2665 id->event_handler = id_priv->id.event_handler; 2666 trace_cm_event_handler(id_priv, event); 2667 return id_priv->id.event_handler(id, event); 2668 } 2669 2670 static int cma_listen_on_dev(struct rdma_id_private *id_priv, 2671 struct cma_device *cma_dev, 2672 struct rdma_id_private **to_destroy) 2673 { 2674 struct rdma_id_private *dev_id_priv; 2675 struct net *net = id_priv->id.route.addr.dev_addr.net; 2676 int ret; 2677 2678 lockdep_assert_held(&lock); 2679 2680 *to_destroy = NULL; 2681 if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) 2682 return 0; 2683 2684 dev_id_priv = 2685 __rdma_create_id(net, cma_listen_handler, id_priv, 2686 id_priv->id.ps, id_priv->id.qp_type, id_priv); 2687 if (IS_ERR(dev_id_priv)) 2688 return PTR_ERR(dev_id_priv); 2689 2690 dev_id_priv->state = RDMA_CM_ADDR_BOUND; 2691 memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv), 2692 rdma_addr_size(cma_src_addr(id_priv))); 2693 2694 _cma_attach_to_dev(dev_id_priv, cma_dev); 2695 rdma_restrack_add(&dev_id_priv->res); 2696 cma_id_get(id_priv); 2697 dev_id_priv->internal_id = 1; 2698 dev_id_priv->afonly = id_priv->afonly; 2699 mutex_lock(&id_priv->qp_mutex); 2700 dev_id_priv->tos_set = id_priv->tos_set; 2701 dev_id_priv->tos = id_priv->tos; 2702 mutex_unlock(&id_priv->qp_mutex); 2703 2704 ret = rdma_listen(&dev_id_priv->id, id_priv->backlog); 2705 if (ret) 2706 goto err_listen; 2707 list_add_tail(&dev_id_priv->listen_item, &id_priv->listen_list); 2708 return 0; 2709 err_listen: 2710 /* Caller must destroy this after releasing lock */ 2711 *to_destroy = dev_id_priv; 2712 dev_warn(&cma_dev->device->dev, "RDMA CMA: %s, error %d\n", __func__, ret); 2713 return ret; 2714 } 2715 2716 static int cma_listen_on_all(struct rdma_id_private *id_priv) 2717 { 2718 struct rdma_id_private *to_destroy; 2719 struct cma_device *cma_dev; 2720 int ret; 2721 2722 mutex_lock(&lock); 2723 list_add_tail(&id_priv->listen_any_item, &listen_any_list); 2724 list_for_each_entry(cma_dev, &dev_list, list) { 2725 ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy); 2726 if (ret) { 2727 /* Prevent racing with cma_process_remove() */ 2728 if (to_destroy) 2729 list_del_init(&to_destroy->device_item); 2730 goto err_listen; 2731 } 2732 } 2733 mutex_unlock(&lock); 2734 return 0; 2735 2736 err_listen: 2737 _cma_cancel_listens(id_priv); 2738 mutex_unlock(&lock); 2739 if (to_destroy) 2740 rdma_destroy_id(&to_destroy->id); 2741 return ret; 2742 } 2743 2744 void rdma_set_service_type(struct rdma_cm_id *id, int tos) 2745 { 2746 struct rdma_id_private *id_priv; 2747 2748 id_priv = container_of(id, struct rdma_id_private, id); 2749 mutex_lock(&id_priv->qp_mutex); 2750 id_priv->tos = (u8) tos; 2751 id_priv->tos_set = true; 2752 mutex_unlock(&id_priv->qp_mutex); 2753 } 2754 EXPORT_SYMBOL(rdma_set_service_type); 2755 2756 /** 2757 * rdma_set_ack_timeout() - Set the ack timeout of QP associated 2758 * with a connection identifier. 2759 * @id: Communication identifier to associated with service type. 2760 * @timeout: Ack timeout to set a QP, expressed as 4.096 * 2^(timeout) usec. 2761 * 2762 * This function should be called before rdma_connect() on active side, 2763 * and on passive side before rdma_accept(). It is applicable to primary 2764 * path only. The timeout will affect the local side of the QP, it is not 2765 * negotiated with remote side and zero disables the timer. In case it is 2766 * set before rdma_resolve_route, the value will also be used to determine 2767 * PacketLifeTime for RoCE. 2768 * 2769 * Return: 0 for success 2770 */ 2771 int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout) 2772 { 2773 struct rdma_id_private *id_priv; 2774 2775 if (id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_INI) 2776 return -EINVAL; 2777 2778 id_priv = container_of(id, struct rdma_id_private, id); 2779 mutex_lock(&id_priv->qp_mutex); 2780 id_priv->timeout = timeout; 2781 id_priv->timeout_set = true; 2782 mutex_unlock(&id_priv->qp_mutex); 2783 2784 return 0; 2785 } 2786 EXPORT_SYMBOL(rdma_set_ack_timeout); 2787 2788 /** 2789 * rdma_set_min_rnr_timer() - Set the minimum RNR Retry timer of the 2790 * QP associated with a connection identifier. 2791 * @id: Communication identifier to associated with service type. 2792 * @min_rnr_timer: 5-bit value encoded as Table 45: "Encoding for RNR NAK 2793 * Timer Field" in the IBTA specification. 2794 * 2795 * This function should be called before rdma_connect() on active 2796 * side, and on passive side before rdma_accept(). The timer value 2797 * will be associated with the local QP. When it receives a send it is 2798 * not read to handle, typically if the receive queue is empty, an RNR 2799 * Retry NAK is returned to the requester with the min_rnr_timer 2800 * encoded. The requester will then wait at least the time specified 2801 * in the NAK before retrying. The default is zero, which translates 2802 * to a minimum RNR Timer value of 655 ms. 2803 * 2804 * Return: 0 for success 2805 */ 2806 int rdma_set_min_rnr_timer(struct rdma_cm_id *id, u8 min_rnr_timer) 2807 { 2808 struct rdma_id_private *id_priv; 2809 2810 /* It is a five-bit value */ 2811 if (min_rnr_timer & 0xe0) 2812 return -EINVAL; 2813 2814 if (WARN_ON(id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_TGT)) 2815 return -EINVAL; 2816 2817 id_priv = container_of(id, struct rdma_id_private, id); 2818 mutex_lock(&id_priv->qp_mutex); 2819 id_priv->min_rnr_timer = min_rnr_timer; 2820 id_priv->min_rnr_timer_set = true; 2821 mutex_unlock(&id_priv->qp_mutex); 2822 2823 return 0; 2824 } 2825 EXPORT_SYMBOL(rdma_set_min_rnr_timer); 2826 2827 static int route_set_path_rec_inbound(struct cma_work *work, 2828 struct sa_path_rec *path_rec) 2829 { 2830 struct rdma_route *route = &work->id->id.route; 2831 2832 if (!route->path_rec_inbound) { 2833 route->path_rec_inbound = 2834 kzalloc(sizeof(*route->path_rec_inbound), GFP_KERNEL); 2835 if (!route->path_rec_inbound) 2836 return -ENOMEM; 2837 } 2838 2839 *route->path_rec_inbound = *path_rec; 2840 return 0; 2841 } 2842 2843 static int route_set_path_rec_outbound(struct cma_work *work, 2844 struct sa_path_rec *path_rec) 2845 { 2846 struct rdma_route *route = &work->id->id.route; 2847 2848 if (!route->path_rec_outbound) { 2849 route->path_rec_outbound = 2850 kzalloc(sizeof(*route->path_rec_outbound), GFP_KERNEL); 2851 if (!route->path_rec_outbound) 2852 return -ENOMEM; 2853 } 2854 2855 *route->path_rec_outbound = *path_rec; 2856 return 0; 2857 } 2858 2859 static void cma_query_handler(int status, struct sa_path_rec *path_rec, 2860 unsigned int num_prs, void *context) 2861 { 2862 struct cma_work *work = context; 2863 struct rdma_route *route; 2864 int i; 2865 2866 route = &work->id->id.route; 2867 2868 if (status) 2869 goto fail; 2870 2871 for (i = 0; i < num_prs; i++) { 2872 if (!path_rec[i].flags || (path_rec[i].flags & IB_PATH_GMP)) 2873 *route->path_rec = path_rec[i]; 2874 else if (path_rec[i].flags & IB_PATH_INBOUND) 2875 status = route_set_path_rec_inbound(work, &path_rec[i]); 2876 else if (path_rec[i].flags & IB_PATH_OUTBOUND) 2877 status = route_set_path_rec_outbound(work, 2878 &path_rec[i]); 2879 else 2880 status = -EINVAL; 2881 2882 if (status) 2883 goto fail; 2884 } 2885 2886 route->num_pri_alt_paths = 1; 2887 queue_work(cma_wq, &work->work); 2888 return; 2889 2890 fail: 2891 work->old_state = RDMA_CM_ROUTE_QUERY; 2892 work->new_state = RDMA_CM_ADDR_RESOLVED; 2893 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; 2894 work->event.status = status; 2895 pr_debug_ratelimited("RDMA CM: ROUTE_ERROR: failed to query path. status %d\n", 2896 status); 2897 queue_work(cma_wq, &work->work); 2898 } 2899 2900 static int cma_query_ib_route(struct rdma_id_private *id_priv, 2901 unsigned long timeout_ms, struct cma_work *work) 2902 { 2903 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 2904 struct sa_path_rec path_rec; 2905 ib_sa_comp_mask comp_mask; 2906 struct sockaddr_in6 *sin6; 2907 struct sockaddr_ib *sib; 2908 2909 memset(&path_rec, 0, sizeof path_rec); 2910 2911 if (rdma_cap_opa_ah(id_priv->id.device, id_priv->id.port_num)) 2912 path_rec.rec_type = SA_PATH_REC_TYPE_OPA; 2913 else 2914 path_rec.rec_type = SA_PATH_REC_TYPE_IB; 2915 rdma_addr_get_sgid(dev_addr, &path_rec.sgid); 2916 rdma_addr_get_dgid(dev_addr, &path_rec.dgid); 2917 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 2918 path_rec.numb_path = 1; 2919 path_rec.reversible = 1; 2920 path_rec.service_id = rdma_get_service_id(&id_priv->id, 2921 cma_dst_addr(id_priv)); 2922 2923 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | 2924 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | 2925 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID; 2926 2927 switch (cma_family(id_priv)) { 2928 case AF_INET: 2929 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); 2930 comp_mask |= IB_SA_PATH_REC_QOS_CLASS; 2931 break; 2932 case AF_INET6: 2933 sin6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); 2934 path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20); 2935 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; 2936 break; 2937 case AF_IB: 2938 sib = (struct sockaddr_ib *) cma_src_addr(id_priv); 2939 path_rec.traffic_class = (u8) (be32_to_cpu(sib->sib_flowinfo) >> 20); 2940 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; 2941 break; 2942 } 2943 2944 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, 2945 id_priv->id.port_num, &path_rec, 2946 comp_mask, timeout_ms, 2947 GFP_KERNEL, cma_query_handler, 2948 work, &id_priv->query); 2949 2950 return (id_priv->query_id < 0) ? id_priv->query_id : 0; 2951 } 2952 2953 static void cma_iboe_join_work_handler(struct work_struct *work) 2954 { 2955 struct cma_multicast *mc = 2956 container_of(work, struct cma_multicast, iboe_join.work); 2957 struct rdma_cm_event *event = &mc->iboe_join.event; 2958 struct rdma_id_private *id_priv = mc->id_priv; 2959 int ret; 2960 2961 mutex_lock(&id_priv->handler_mutex); 2962 if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING || 2963 READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL) 2964 goto out_unlock; 2965 2966 ret = cma_cm_event_handler(id_priv, event); 2967 WARN_ON(ret); 2968 2969 out_unlock: 2970 mutex_unlock(&id_priv->handler_mutex); 2971 if (event->event == RDMA_CM_EVENT_MULTICAST_JOIN) 2972 rdma_destroy_ah_attr(&event->param.ud.ah_attr); 2973 } 2974 2975 static void cma_work_handler(struct work_struct *_work) 2976 { 2977 struct cma_work *work = container_of(_work, struct cma_work, work); 2978 struct rdma_id_private *id_priv = work->id; 2979 2980 mutex_lock(&id_priv->handler_mutex); 2981 if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING || 2982 READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL) 2983 goto out_unlock; 2984 if (work->old_state != 0 || work->new_state != 0) { 2985 if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) 2986 goto out_unlock; 2987 } 2988 2989 if (cma_cm_event_handler(id_priv, &work->event)) { 2990 cma_id_put(id_priv); 2991 destroy_id_handler_unlock(id_priv); 2992 goto out_free; 2993 } 2994 2995 out_unlock: 2996 mutex_unlock(&id_priv->handler_mutex); 2997 cma_id_put(id_priv); 2998 out_free: 2999 if (work->event.event == RDMA_CM_EVENT_MULTICAST_JOIN) 3000 rdma_destroy_ah_attr(&work->event.param.ud.ah_attr); 3001 kfree(work); 3002 } 3003 3004 static void cma_init_resolve_route_work(struct cma_work *work, 3005 struct rdma_id_private *id_priv) 3006 { 3007 work->id = id_priv; 3008 INIT_WORK(&work->work, cma_work_handler); 3009 work->old_state = RDMA_CM_ROUTE_QUERY; 3010 work->new_state = RDMA_CM_ROUTE_RESOLVED; 3011 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 3012 } 3013 3014 static void enqueue_resolve_addr_work(struct cma_work *work, 3015 struct rdma_id_private *id_priv) 3016 { 3017 /* Balances with cma_id_put() in cma_work_handler */ 3018 cma_id_get(id_priv); 3019 3020 work->id = id_priv; 3021 INIT_WORK(&work->work, cma_work_handler); 3022 work->old_state = RDMA_CM_ADDR_QUERY; 3023 work->new_state = RDMA_CM_ADDR_RESOLVED; 3024 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 3025 3026 queue_work(cma_wq, &work->work); 3027 } 3028 3029 static int cma_resolve_ib_route(struct rdma_id_private *id_priv, 3030 unsigned long timeout_ms) 3031 { 3032 struct rdma_route *route = &id_priv->id.route; 3033 struct cma_work *work; 3034 int ret; 3035 3036 work = kzalloc(sizeof *work, GFP_KERNEL); 3037 if (!work) 3038 return -ENOMEM; 3039 3040 cma_init_resolve_route_work(work, id_priv); 3041 3042 if (!route->path_rec) 3043 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); 3044 if (!route->path_rec) { 3045 ret = -ENOMEM; 3046 goto err1; 3047 } 3048 3049 ret = cma_query_ib_route(id_priv, timeout_ms, work); 3050 if (ret) 3051 goto err2; 3052 3053 return 0; 3054 err2: 3055 kfree(route->path_rec); 3056 route->path_rec = NULL; 3057 err1: 3058 kfree(work); 3059 return ret; 3060 } 3061 3062 static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type, 3063 unsigned long supported_gids, 3064 enum ib_gid_type default_gid) 3065 { 3066 if ((network_type == RDMA_NETWORK_IPV4 || 3067 network_type == RDMA_NETWORK_IPV6) && 3068 test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids)) 3069 return IB_GID_TYPE_ROCE_UDP_ENCAP; 3070 3071 return default_gid; 3072 } 3073 3074 /* 3075 * cma_iboe_set_path_rec_l2_fields() is helper function which sets 3076 * path record type based on GID type. 3077 * It also sets up other L2 fields which includes destination mac address 3078 * netdev ifindex, of the path record. 3079 * It returns the netdev of the bound interface for this path record entry. 3080 */ 3081 static struct net_device * 3082 cma_iboe_set_path_rec_l2_fields(struct rdma_id_private *id_priv) 3083 { 3084 struct rdma_route *route = &id_priv->id.route; 3085 enum ib_gid_type gid_type = IB_GID_TYPE_ROCE; 3086 struct rdma_addr *addr = &route->addr; 3087 unsigned long supported_gids; 3088 struct net_device *ndev; 3089 3090 if (!addr->dev_addr.bound_dev_if) 3091 return NULL; 3092 3093 ndev = dev_get_by_index(addr->dev_addr.net, 3094 addr->dev_addr.bound_dev_if); 3095 if (!ndev) 3096 return NULL; 3097 3098 supported_gids = roce_gid_type_mask_support(id_priv->id.device, 3099 id_priv->id.port_num); 3100 gid_type = cma_route_gid_type(addr->dev_addr.network, 3101 supported_gids, 3102 id_priv->gid_type); 3103 /* Use the hint from IP Stack to select GID Type */ 3104 if (gid_type < ib_network_to_gid_type(addr->dev_addr.network)) 3105 gid_type = ib_network_to_gid_type(addr->dev_addr.network); 3106 route->path_rec->rec_type = sa_conv_gid_to_pathrec_type(gid_type); 3107 3108 route->path_rec->roce.route_resolved = true; 3109 sa_path_set_dmac(route->path_rec, addr->dev_addr.dst_dev_addr); 3110 return ndev; 3111 } 3112 3113 int rdma_set_ib_path(struct rdma_cm_id *id, 3114 struct sa_path_rec *path_rec) 3115 { 3116 struct rdma_id_private *id_priv; 3117 struct net_device *ndev; 3118 int ret; 3119 3120 id_priv = container_of(id, struct rdma_id_private, id); 3121 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, 3122 RDMA_CM_ROUTE_RESOLVED)) 3123 return -EINVAL; 3124 3125 id->route.path_rec = kmemdup(path_rec, sizeof(*path_rec), 3126 GFP_KERNEL); 3127 if (!id->route.path_rec) { 3128 ret = -ENOMEM; 3129 goto err; 3130 } 3131 3132 if (rdma_protocol_roce(id->device, id->port_num)) { 3133 ndev = cma_iboe_set_path_rec_l2_fields(id_priv); 3134 if (!ndev) { 3135 ret = -ENODEV; 3136 goto err_free; 3137 } 3138 dev_put(ndev); 3139 } 3140 3141 id->route.num_pri_alt_paths = 1; 3142 return 0; 3143 3144 err_free: 3145 kfree(id->route.path_rec); 3146 id->route.path_rec = NULL; 3147 err: 3148 cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED); 3149 return ret; 3150 } 3151 EXPORT_SYMBOL(rdma_set_ib_path); 3152 3153 static int cma_resolve_iw_route(struct rdma_id_private *id_priv) 3154 { 3155 struct cma_work *work; 3156 3157 work = kzalloc(sizeof *work, GFP_KERNEL); 3158 if (!work) 3159 return -ENOMEM; 3160 3161 cma_init_resolve_route_work(work, id_priv); 3162 queue_work(cma_wq, &work->work); 3163 return 0; 3164 } 3165 3166 static int get_vlan_ndev_tc(struct net_device *vlan_ndev, int prio) 3167 { 3168 struct net_device *dev; 3169 3170 dev = vlan_dev_real_dev(vlan_ndev); 3171 if (dev->num_tc) 3172 return netdev_get_prio_tc_map(dev, prio); 3173 3174 return (vlan_dev_get_egress_qos_mask(vlan_ndev, prio) & 3175 VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; 3176 } 3177 3178 struct iboe_prio_tc_map { 3179 int input_prio; 3180 int output_tc; 3181 bool found; 3182 }; 3183 3184 static int get_lower_vlan_dev_tc(struct net_device *dev, 3185 struct netdev_nested_priv *priv) 3186 { 3187 struct iboe_prio_tc_map *map = (struct iboe_prio_tc_map *)priv->data; 3188 3189 if (is_vlan_dev(dev)) 3190 map->output_tc = get_vlan_ndev_tc(dev, map->input_prio); 3191 else if (dev->num_tc) 3192 map->output_tc = netdev_get_prio_tc_map(dev, map->input_prio); 3193 else 3194 map->output_tc = 0; 3195 /* We are interested only in first level VLAN device, so always 3196 * return 1 to stop iterating over next level devices. 3197 */ 3198 map->found = true; 3199 return 1; 3200 } 3201 3202 static int iboe_tos_to_sl(struct net_device *ndev, int tos) 3203 { 3204 struct iboe_prio_tc_map prio_tc_map = {}; 3205 int prio = rt_tos2priority(tos); 3206 struct netdev_nested_priv priv; 3207 3208 /* If VLAN device, get it directly from the VLAN netdev */ 3209 if (is_vlan_dev(ndev)) 3210 return get_vlan_ndev_tc(ndev, prio); 3211 3212 prio_tc_map.input_prio = prio; 3213 priv.data = (void *)&prio_tc_map; 3214 rcu_read_lock(); 3215 netdev_walk_all_lower_dev_rcu(ndev, 3216 get_lower_vlan_dev_tc, 3217 &priv); 3218 rcu_read_unlock(); 3219 /* If map is found from lower device, use it; Otherwise 3220 * continue with the current netdevice to get priority to tc map. 3221 */ 3222 if (prio_tc_map.found) 3223 return prio_tc_map.output_tc; 3224 else if (ndev->num_tc) 3225 return netdev_get_prio_tc_map(ndev, prio); 3226 else 3227 return 0; 3228 } 3229 3230 static __be32 cma_get_roce_udp_flow_label(struct rdma_id_private *id_priv) 3231 { 3232 struct sockaddr_in6 *addr6; 3233 u16 dport, sport; 3234 u32 hash, fl; 3235 3236 addr6 = (struct sockaddr_in6 *)cma_src_addr(id_priv); 3237 fl = be32_to_cpu(addr6->sin6_flowinfo) & IB_GRH_FLOWLABEL_MASK; 3238 if ((cma_family(id_priv) != AF_INET6) || !fl) { 3239 dport = be16_to_cpu(cma_port(cma_dst_addr(id_priv))); 3240 sport = be16_to_cpu(cma_port(cma_src_addr(id_priv))); 3241 hash = (u32)sport * 31 + dport; 3242 fl = hash & IB_GRH_FLOWLABEL_MASK; 3243 } 3244 3245 return cpu_to_be32(fl); 3246 } 3247 3248 static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) 3249 { 3250 struct rdma_route *route = &id_priv->id.route; 3251 struct rdma_addr *addr = &route->addr; 3252 struct cma_work *work; 3253 int ret; 3254 struct net_device *ndev; 3255 3256 u8 default_roce_tos = id_priv->cma_dev->default_roce_tos[id_priv->id.port_num - 3257 rdma_start_port(id_priv->cma_dev->device)]; 3258 u8 tos; 3259 3260 mutex_lock(&id_priv->qp_mutex); 3261 tos = id_priv->tos_set ? id_priv->tos : default_roce_tos; 3262 mutex_unlock(&id_priv->qp_mutex); 3263 3264 work = kzalloc(sizeof *work, GFP_KERNEL); 3265 if (!work) 3266 return -ENOMEM; 3267 3268 route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL); 3269 if (!route->path_rec) { 3270 ret = -ENOMEM; 3271 goto err1; 3272 } 3273 3274 route->num_pri_alt_paths = 1; 3275 3276 ndev = cma_iboe_set_path_rec_l2_fields(id_priv); 3277 if (!ndev) { 3278 ret = -ENODEV; 3279 goto err2; 3280 } 3281 3282 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, 3283 &route->path_rec->sgid); 3284 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr, 3285 &route->path_rec->dgid); 3286 3287 if (((struct sockaddr *)&id_priv->id.route.addr.dst_addr)->sa_family != AF_IB) 3288 /* TODO: get the hoplimit from the inet/inet6 device */ 3289 route->path_rec->hop_limit = addr->dev_addr.hoplimit; 3290 else 3291 route->path_rec->hop_limit = 1; 3292 route->path_rec->reversible = 1; 3293 route->path_rec->pkey = cpu_to_be16(0xffff); 3294 route->path_rec->mtu_selector = IB_SA_EQ; 3295 route->path_rec->sl = iboe_tos_to_sl(ndev, tos); 3296 route->path_rec->traffic_class = tos; 3297 route->path_rec->mtu = iboe_get_mtu(ndev->mtu); 3298 route->path_rec->rate_selector = IB_SA_EQ; 3299 route->path_rec->rate = iboe_get_rate(ndev); 3300 dev_put(ndev); 3301 route->path_rec->packet_life_time_selector = IB_SA_EQ; 3302 /* In case ACK timeout is set, use this value to calculate 3303 * PacketLifeTime. As per IBTA 12.7.34, 3304 * local ACK timeout = (2 * PacketLifeTime + Local CA’s ACK delay). 3305 * Assuming a negligible local ACK delay, we can use 3306 * PacketLifeTime = local ACK timeout/2 3307 * as a reasonable approximation for RoCE networks. 3308 */ 3309 mutex_lock(&id_priv->qp_mutex); 3310 if (id_priv->timeout_set && id_priv->timeout) 3311 route->path_rec->packet_life_time = id_priv->timeout - 1; 3312 else 3313 route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME; 3314 mutex_unlock(&id_priv->qp_mutex); 3315 3316 if (!route->path_rec->mtu) { 3317 ret = -EINVAL; 3318 goto err2; 3319 } 3320 3321 if (rdma_protocol_roce_udp_encap(id_priv->id.device, 3322 id_priv->id.port_num)) 3323 route->path_rec->flow_label = 3324 cma_get_roce_udp_flow_label(id_priv); 3325 3326 cma_init_resolve_route_work(work, id_priv); 3327 queue_work(cma_wq, &work->work); 3328 3329 return 0; 3330 3331 err2: 3332 kfree(route->path_rec); 3333 route->path_rec = NULL; 3334 route->num_pri_alt_paths = 0; 3335 err1: 3336 kfree(work); 3337 return ret; 3338 } 3339 3340 int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms) 3341 { 3342 struct rdma_id_private *id_priv; 3343 int ret; 3344 3345 if (!timeout_ms) 3346 return -EINVAL; 3347 3348 id_priv = container_of(id, struct rdma_id_private, id); 3349 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY)) 3350 return -EINVAL; 3351 3352 cma_id_get(id_priv); 3353 if (rdma_cap_ib_sa(id->device, id->port_num)) 3354 ret = cma_resolve_ib_route(id_priv, timeout_ms); 3355 else if (rdma_protocol_roce(id->device, id->port_num)) { 3356 ret = cma_resolve_iboe_route(id_priv); 3357 if (!ret) 3358 cma_add_id_to_tree(id_priv); 3359 } 3360 else if (rdma_protocol_iwarp(id->device, id->port_num)) 3361 ret = cma_resolve_iw_route(id_priv); 3362 else 3363 ret = -ENOSYS; 3364 3365 if (ret) 3366 goto err; 3367 3368 return 0; 3369 err: 3370 cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED); 3371 cma_id_put(id_priv); 3372 return ret; 3373 } 3374 EXPORT_SYMBOL(rdma_resolve_route); 3375 3376 static void cma_set_loopback(struct sockaddr *addr) 3377 { 3378 switch (addr->sa_family) { 3379 case AF_INET: 3380 ((struct sockaddr_in *) addr)->sin_addr.s_addr = htonl(INADDR_LOOPBACK); 3381 break; 3382 case AF_INET6: 3383 ipv6_addr_set(&((struct sockaddr_in6 *) addr)->sin6_addr, 3384 0, 0, 0, htonl(1)); 3385 break; 3386 default: 3387 ib_addr_set(&((struct sockaddr_ib *) addr)->sib_addr, 3388 0, 0, 0, htonl(1)); 3389 break; 3390 } 3391 } 3392 3393 static int cma_bind_loopback(struct rdma_id_private *id_priv) 3394 { 3395 struct cma_device *cma_dev, *cur_dev; 3396 union ib_gid gid; 3397 enum ib_port_state port_state; 3398 unsigned int p; 3399 u16 pkey; 3400 int ret; 3401 3402 cma_dev = NULL; 3403 mutex_lock(&lock); 3404 list_for_each_entry(cur_dev, &dev_list, list) { 3405 if (cma_family(id_priv) == AF_IB && 3406 !rdma_cap_ib_cm(cur_dev->device, 1)) 3407 continue; 3408 3409 if (!cma_dev) 3410 cma_dev = cur_dev; 3411 3412 rdma_for_each_port (cur_dev->device, p) { 3413 if (!ib_get_cached_port_state(cur_dev->device, p, &port_state) && 3414 port_state == IB_PORT_ACTIVE) { 3415 cma_dev = cur_dev; 3416 goto port_found; 3417 } 3418 } 3419 } 3420 3421 if (!cma_dev) { 3422 ret = -ENODEV; 3423 goto out; 3424 } 3425 3426 p = 1; 3427 3428 port_found: 3429 ret = rdma_query_gid(cma_dev->device, p, 0, &gid); 3430 if (ret) 3431 goto out; 3432 3433 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey); 3434 if (ret) 3435 goto out; 3436 3437 id_priv->id.route.addr.dev_addr.dev_type = 3438 (rdma_protocol_ib(cma_dev->device, p)) ? 3439 ARPHRD_INFINIBAND : ARPHRD_ETHER; 3440 3441 rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); 3442 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); 3443 id_priv->id.port_num = p; 3444 cma_attach_to_dev(id_priv, cma_dev); 3445 rdma_restrack_add(&id_priv->res); 3446 cma_set_loopback(cma_src_addr(id_priv)); 3447 out: 3448 mutex_unlock(&lock); 3449 return ret; 3450 } 3451 3452 static void addr_handler(int status, struct sockaddr *src_addr, 3453 struct rdma_dev_addr *dev_addr, void *context) 3454 { 3455 struct rdma_id_private *id_priv = context; 3456 struct rdma_cm_event event = {}; 3457 struct sockaddr *addr; 3458 struct sockaddr_storage old_addr; 3459 3460 mutex_lock(&id_priv->handler_mutex); 3461 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, 3462 RDMA_CM_ADDR_RESOLVED)) 3463 goto out; 3464 3465 /* 3466 * Store the previous src address, so that if we fail to acquire 3467 * matching rdma device, old address can be restored back, which helps 3468 * to cancel the cma listen operation correctly. 3469 */ 3470 addr = cma_src_addr(id_priv); 3471 memcpy(&old_addr, addr, rdma_addr_size(addr)); 3472 memcpy(addr, src_addr, rdma_addr_size(src_addr)); 3473 if (!status && !id_priv->cma_dev) { 3474 status = cma_acquire_dev_by_src_ip(id_priv); 3475 if (status) 3476 pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n", 3477 status); 3478 rdma_restrack_add(&id_priv->res); 3479 } else if (status) { 3480 pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status); 3481 } 3482 3483 if (status) { 3484 memcpy(addr, &old_addr, 3485 rdma_addr_size((struct sockaddr *)&old_addr)); 3486 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, 3487 RDMA_CM_ADDR_BOUND)) 3488 goto out; 3489 event.event = RDMA_CM_EVENT_ADDR_ERROR; 3490 event.status = status; 3491 } else 3492 event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 3493 3494 if (cma_cm_event_handler(id_priv, &event)) { 3495 destroy_id_handler_unlock(id_priv); 3496 return; 3497 } 3498 out: 3499 mutex_unlock(&id_priv->handler_mutex); 3500 } 3501 3502 static int cma_resolve_loopback(struct rdma_id_private *id_priv) 3503 { 3504 struct cma_work *work; 3505 union ib_gid gid; 3506 int ret; 3507 3508 work = kzalloc(sizeof *work, GFP_KERNEL); 3509 if (!work) 3510 return -ENOMEM; 3511 3512 if (!id_priv->cma_dev) { 3513 ret = cma_bind_loopback(id_priv); 3514 if (ret) 3515 goto err; 3516 } 3517 3518 rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); 3519 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); 3520 3521 enqueue_resolve_addr_work(work, id_priv); 3522 return 0; 3523 err: 3524 kfree(work); 3525 return ret; 3526 } 3527 3528 static int cma_resolve_ib_addr(struct rdma_id_private *id_priv) 3529 { 3530 struct cma_work *work; 3531 int ret; 3532 3533 work = kzalloc(sizeof *work, GFP_KERNEL); 3534 if (!work) 3535 return -ENOMEM; 3536 3537 if (!id_priv->cma_dev) { 3538 ret = cma_resolve_ib_dev(id_priv); 3539 if (ret) 3540 goto err; 3541 } 3542 3543 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *) 3544 &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr)); 3545 3546 enqueue_resolve_addr_work(work, id_priv); 3547 return 0; 3548 err: 3549 kfree(work); 3550 return ret; 3551 } 3552 3553 int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse) 3554 { 3555 struct rdma_id_private *id_priv; 3556 unsigned long flags; 3557 int ret; 3558 3559 id_priv = container_of(id, struct rdma_id_private, id); 3560 spin_lock_irqsave(&id_priv->lock, flags); 3561 if ((reuse && id_priv->state != RDMA_CM_LISTEN) || 3562 id_priv->state == RDMA_CM_IDLE) { 3563 id_priv->reuseaddr = reuse; 3564 ret = 0; 3565 } else { 3566 ret = -EINVAL; 3567 } 3568 spin_unlock_irqrestore(&id_priv->lock, flags); 3569 return ret; 3570 } 3571 EXPORT_SYMBOL(rdma_set_reuseaddr); 3572 3573 int rdma_set_afonly(struct rdma_cm_id *id, int afonly) 3574 { 3575 struct rdma_id_private *id_priv; 3576 unsigned long flags; 3577 int ret; 3578 3579 id_priv = container_of(id, struct rdma_id_private, id); 3580 spin_lock_irqsave(&id_priv->lock, flags); 3581 if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) { 3582 id_priv->options |= (1 << CMA_OPTION_AFONLY); 3583 id_priv->afonly = afonly; 3584 ret = 0; 3585 } else { 3586 ret = -EINVAL; 3587 } 3588 spin_unlock_irqrestore(&id_priv->lock, flags); 3589 return ret; 3590 } 3591 EXPORT_SYMBOL(rdma_set_afonly); 3592 3593 static void cma_bind_port(struct rdma_bind_list *bind_list, 3594 struct rdma_id_private *id_priv) 3595 { 3596 struct sockaddr *addr; 3597 struct sockaddr_ib *sib; 3598 u64 sid, mask; 3599 __be16 port; 3600 3601 lockdep_assert_held(&lock); 3602 3603 addr = cma_src_addr(id_priv); 3604 port = htons(bind_list->port); 3605 3606 switch (addr->sa_family) { 3607 case AF_INET: 3608 ((struct sockaddr_in *) addr)->sin_port = port; 3609 break; 3610 case AF_INET6: 3611 ((struct sockaddr_in6 *) addr)->sin6_port = port; 3612 break; 3613 case AF_IB: 3614 sib = (struct sockaddr_ib *) addr; 3615 sid = be64_to_cpu(sib->sib_sid); 3616 mask = be64_to_cpu(sib->sib_sid_mask); 3617 sib->sib_sid = cpu_to_be64((sid & mask) | (u64) ntohs(port)); 3618 sib->sib_sid_mask = cpu_to_be64(~0ULL); 3619 break; 3620 } 3621 id_priv->bind_list = bind_list; 3622 hlist_add_head(&id_priv->node, &bind_list->owners); 3623 } 3624 3625 static int cma_alloc_port(enum rdma_ucm_port_space ps, 3626 struct rdma_id_private *id_priv, unsigned short snum) 3627 { 3628 struct rdma_bind_list *bind_list; 3629 int ret; 3630 3631 lockdep_assert_held(&lock); 3632 3633 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); 3634 if (!bind_list) 3635 return -ENOMEM; 3636 3637 ret = cma_ps_alloc(id_priv->id.route.addr.dev_addr.net, ps, bind_list, 3638 snum); 3639 if (ret < 0) 3640 goto err; 3641 3642 bind_list->ps = ps; 3643 bind_list->port = snum; 3644 cma_bind_port(bind_list, id_priv); 3645 return 0; 3646 err: 3647 kfree(bind_list); 3648 return ret == -ENOSPC ? -EADDRNOTAVAIL : ret; 3649 } 3650 3651 static int cma_port_is_unique(struct rdma_bind_list *bind_list, 3652 struct rdma_id_private *id_priv) 3653 { 3654 struct rdma_id_private *cur_id; 3655 struct sockaddr *daddr = cma_dst_addr(id_priv); 3656 struct sockaddr *saddr = cma_src_addr(id_priv); 3657 __be16 dport = cma_port(daddr); 3658 3659 lockdep_assert_held(&lock); 3660 3661 hlist_for_each_entry(cur_id, &bind_list->owners, node) { 3662 struct sockaddr *cur_daddr = cma_dst_addr(cur_id); 3663 struct sockaddr *cur_saddr = cma_src_addr(cur_id); 3664 __be16 cur_dport = cma_port(cur_daddr); 3665 3666 if (id_priv == cur_id) 3667 continue; 3668 3669 /* different dest port -> unique */ 3670 if (!cma_any_port(daddr) && 3671 !cma_any_port(cur_daddr) && 3672 (dport != cur_dport)) 3673 continue; 3674 3675 /* different src address -> unique */ 3676 if (!cma_any_addr(saddr) && 3677 !cma_any_addr(cur_saddr) && 3678 cma_addr_cmp(saddr, cur_saddr)) 3679 continue; 3680 3681 /* different dst address -> unique */ 3682 if (!cma_any_addr(daddr) && 3683 !cma_any_addr(cur_daddr) && 3684 cma_addr_cmp(daddr, cur_daddr)) 3685 continue; 3686 3687 return -EADDRNOTAVAIL; 3688 } 3689 return 0; 3690 } 3691 3692 static int cma_alloc_any_port(enum rdma_ucm_port_space ps, 3693 struct rdma_id_private *id_priv) 3694 { 3695 static unsigned int last_used_port; 3696 int low, high, remaining; 3697 unsigned int rover; 3698 struct net *net = id_priv->id.route.addr.dev_addr.net; 3699 3700 lockdep_assert_held(&lock); 3701 3702 inet_get_local_port_range(net, &low, &high); 3703 remaining = (high - low) + 1; 3704 rover = get_random_u32_inclusive(low, remaining + low - 1); 3705 retry: 3706 if (last_used_port != rover) { 3707 struct rdma_bind_list *bind_list; 3708 int ret; 3709 3710 bind_list = cma_ps_find(net, ps, (unsigned short)rover); 3711 3712 if (!bind_list) { 3713 ret = cma_alloc_port(ps, id_priv, rover); 3714 } else { 3715 ret = cma_port_is_unique(bind_list, id_priv); 3716 if (!ret) 3717 cma_bind_port(bind_list, id_priv); 3718 } 3719 /* 3720 * Remember previously used port number in order to avoid 3721 * re-using same port immediately after it is closed. 3722 */ 3723 if (!ret) 3724 last_used_port = rover; 3725 if (ret != -EADDRNOTAVAIL) 3726 return ret; 3727 } 3728 if (--remaining) { 3729 rover++; 3730 if ((rover < low) || (rover > high)) 3731 rover = low; 3732 goto retry; 3733 } 3734 return -EADDRNOTAVAIL; 3735 } 3736 3737 /* 3738 * Check that the requested port is available. This is called when trying to 3739 * bind to a specific port, or when trying to listen on a bound port. In 3740 * the latter case, the provided id_priv may already be on the bind_list, but 3741 * we still need to check that it's okay to start listening. 3742 */ 3743 static int cma_check_port(struct rdma_bind_list *bind_list, 3744 struct rdma_id_private *id_priv, uint8_t reuseaddr) 3745 { 3746 struct rdma_id_private *cur_id; 3747 struct sockaddr *addr, *cur_addr; 3748 3749 lockdep_assert_held(&lock); 3750 3751 addr = cma_src_addr(id_priv); 3752 hlist_for_each_entry(cur_id, &bind_list->owners, node) { 3753 if (id_priv == cur_id) 3754 continue; 3755 3756 if (reuseaddr && cur_id->reuseaddr) 3757 continue; 3758 3759 cur_addr = cma_src_addr(cur_id); 3760 if (id_priv->afonly && cur_id->afonly && 3761 (addr->sa_family != cur_addr->sa_family)) 3762 continue; 3763 3764 if (cma_any_addr(addr) || cma_any_addr(cur_addr)) 3765 return -EADDRNOTAVAIL; 3766 3767 if (!cma_addr_cmp(addr, cur_addr)) 3768 return -EADDRINUSE; 3769 } 3770 return 0; 3771 } 3772 3773 static int cma_use_port(enum rdma_ucm_port_space ps, 3774 struct rdma_id_private *id_priv) 3775 { 3776 struct rdma_bind_list *bind_list; 3777 unsigned short snum; 3778 int ret; 3779 3780 lockdep_assert_held(&lock); 3781 3782 snum = ntohs(cma_port(cma_src_addr(id_priv))); 3783 if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) 3784 return -EACCES; 3785 3786 bind_list = cma_ps_find(id_priv->id.route.addr.dev_addr.net, ps, snum); 3787 if (!bind_list) { 3788 ret = cma_alloc_port(ps, id_priv, snum); 3789 } else { 3790 ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr); 3791 if (!ret) 3792 cma_bind_port(bind_list, id_priv); 3793 } 3794 return ret; 3795 } 3796 3797 static enum rdma_ucm_port_space 3798 cma_select_inet_ps(struct rdma_id_private *id_priv) 3799 { 3800 switch (id_priv->id.ps) { 3801 case RDMA_PS_TCP: 3802 case RDMA_PS_UDP: 3803 case RDMA_PS_IPOIB: 3804 case RDMA_PS_IB: 3805 return id_priv->id.ps; 3806 default: 3807 3808 return 0; 3809 } 3810 } 3811 3812 static enum rdma_ucm_port_space 3813 cma_select_ib_ps(struct rdma_id_private *id_priv) 3814 { 3815 enum rdma_ucm_port_space ps = 0; 3816 struct sockaddr_ib *sib; 3817 u64 sid_ps, mask, sid; 3818 3819 sib = (struct sockaddr_ib *) cma_src_addr(id_priv); 3820 mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK; 3821 sid = be64_to_cpu(sib->sib_sid) & mask; 3822 3823 if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) { 3824 sid_ps = RDMA_IB_IP_PS_IB; 3825 ps = RDMA_PS_IB; 3826 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) && 3827 (sid == (RDMA_IB_IP_PS_TCP & mask))) { 3828 sid_ps = RDMA_IB_IP_PS_TCP; 3829 ps = RDMA_PS_TCP; 3830 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) && 3831 (sid == (RDMA_IB_IP_PS_UDP & mask))) { 3832 sid_ps = RDMA_IB_IP_PS_UDP; 3833 ps = RDMA_PS_UDP; 3834 } 3835 3836 if (ps) { 3837 sib->sib_sid = cpu_to_be64(sid_ps | ntohs(cma_port((struct sockaddr *) sib))); 3838 sib->sib_sid_mask = cpu_to_be64(RDMA_IB_IP_PS_MASK | 3839 be64_to_cpu(sib->sib_sid_mask)); 3840 } 3841 return ps; 3842 } 3843 3844 static int cma_get_port(struct rdma_id_private *id_priv) 3845 { 3846 enum rdma_ucm_port_space ps; 3847 int ret; 3848 3849 if (cma_family(id_priv) != AF_IB) 3850 ps = cma_select_inet_ps(id_priv); 3851 else 3852 ps = cma_select_ib_ps(id_priv); 3853 if (!ps) 3854 return -EPROTONOSUPPORT; 3855 3856 mutex_lock(&lock); 3857 if (cma_any_port(cma_src_addr(id_priv))) 3858 ret = cma_alloc_any_port(ps, id_priv); 3859 else 3860 ret = cma_use_port(ps, id_priv); 3861 mutex_unlock(&lock); 3862 3863 return ret; 3864 } 3865 3866 static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, 3867 struct sockaddr *addr) 3868 { 3869 #if IS_ENABLED(CONFIG_IPV6) 3870 struct sockaddr_in6 *sin6; 3871 3872 if (addr->sa_family != AF_INET6) 3873 return 0; 3874 3875 sin6 = (struct sockaddr_in6 *) addr; 3876 3877 if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)) 3878 return 0; 3879 3880 if (!sin6->sin6_scope_id) 3881 return -EINVAL; 3882 3883 dev_addr->bound_dev_if = sin6->sin6_scope_id; 3884 #endif 3885 return 0; 3886 } 3887 3888 int rdma_listen(struct rdma_cm_id *id, int backlog) 3889 { 3890 struct rdma_id_private *id_priv = 3891 container_of(id, struct rdma_id_private, id); 3892 int ret; 3893 3894 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) { 3895 struct sockaddr_in any_in = { 3896 .sin_family = AF_INET, 3897 .sin_addr.s_addr = htonl(INADDR_ANY), 3898 }; 3899 3900 /* For a well behaved ULP state will be RDMA_CM_IDLE */ 3901 ret = rdma_bind_addr(id, (struct sockaddr *)&any_in); 3902 if (ret) 3903 return ret; 3904 if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, 3905 RDMA_CM_LISTEN))) 3906 return -EINVAL; 3907 } 3908 3909 /* 3910 * Once the ID reaches RDMA_CM_LISTEN it is not allowed to be reusable 3911 * any more, and has to be unique in the bind list. 3912 */ 3913 if (id_priv->reuseaddr) { 3914 mutex_lock(&lock); 3915 ret = cma_check_port(id_priv->bind_list, id_priv, 0); 3916 if (!ret) 3917 id_priv->reuseaddr = 0; 3918 mutex_unlock(&lock); 3919 if (ret) 3920 goto err; 3921 } 3922 3923 id_priv->backlog = backlog; 3924 if (id_priv->cma_dev) { 3925 if (rdma_cap_ib_cm(id->device, 1)) { 3926 ret = cma_ib_listen(id_priv); 3927 if (ret) 3928 goto err; 3929 } else if (rdma_cap_iw_cm(id->device, 1)) { 3930 ret = cma_iw_listen(id_priv, backlog); 3931 if (ret) 3932 goto err; 3933 } else { 3934 ret = -ENOSYS; 3935 goto err; 3936 } 3937 } else { 3938 ret = cma_listen_on_all(id_priv); 3939 if (ret) 3940 goto err; 3941 } 3942 3943 return 0; 3944 err: 3945 id_priv->backlog = 0; 3946 /* 3947 * All the failure paths that lead here will not allow the req_handler's 3948 * to have run. 3949 */ 3950 cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND); 3951 return ret; 3952 } 3953 EXPORT_SYMBOL(rdma_listen); 3954 3955 static int rdma_bind_addr_dst(struct rdma_id_private *id_priv, 3956 struct sockaddr *addr, const struct sockaddr *daddr) 3957 { 3958 struct sockaddr *id_daddr; 3959 int ret; 3960 3961 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 && 3962 addr->sa_family != AF_IB) 3963 return -EAFNOSUPPORT; 3964 3965 if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND)) 3966 return -EINVAL; 3967 3968 ret = cma_check_linklocal(&id_priv->id.route.addr.dev_addr, addr); 3969 if (ret) 3970 goto err1; 3971 3972 memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr)); 3973 if (!cma_any_addr(addr)) { 3974 ret = cma_translate_addr(addr, &id_priv->id.route.addr.dev_addr); 3975 if (ret) 3976 goto err1; 3977 3978 ret = cma_acquire_dev_by_src_ip(id_priv); 3979 if (ret) 3980 goto err1; 3981 } 3982 3983 if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) { 3984 if (addr->sa_family == AF_INET) 3985 id_priv->afonly = 1; 3986 #if IS_ENABLED(CONFIG_IPV6) 3987 else if (addr->sa_family == AF_INET6) { 3988 struct net *net = id_priv->id.route.addr.dev_addr.net; 3989 3990 id_priv->afonly = net->ipv6.sysctl.bindv6only; 3991 } 3992 #endif 3993 } 3994 id_daddr = cma_dst_addr(id_priv); 3995 if (daddr != id_daddr) 3996 memcpy(id_daddr, daddr, rdma_addr_size(addr)); 3997 id_daddr->sa_family = addr->sa_family; 3998 3999 ret = cma_get_port(id_priv); 4000 if (ret) 4001 goto err2; 4002 4003 if (!cma_any_addr(addr)) 4004 rdma_restrack_add(&id_priv->res); 4005 return 0; 4006 err2: 4007 if (id_priv->cma_dev) 4008 cma_release_dev(id_priv); 4009 err1: 4010 cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE); 4011 return ret; 4012 } 4013 4014 static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 4015 const struct sockaddr *dst_addr) 4016 { 4017 struct rdma_id_private *id_priv = 4018 container_of(id, struct rdma_id_private, id); 4019 struct sockaddr_storage zero_sock = {}; 4020 4021 if (src_addr && src_addr->sa_family) 4022 return rdma_bind_addr_dst(id_priv, src_addr, dst_addr); 4023 4024 /* 4025 * When the src_addr is not specified, automatically supply an any addr 4026 */ 4027 zero_sock.ss_family = dst_addr->sa_family; 4028 if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) { 4029 struct sockaddr_in6 *src_addr6 = 4030 (struct sockaddr_in6 *)&zero_sock; 4031 struct sockaddr_in6 *dst_addr6 = 4032 (struct sockaddr_in6 *)dst_addr; 4033 4034 src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; 4035 if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) 4036 id->route.addr.dev_addr.bound_dev_if = 4037 dst_addr6->sin6_scope_id; 4038 } else if (dst_addr->sa_family == AF_IB) { 4039 ((struct sockaddr_ib *)&zero_sock)->sib_pkey = 4040 ((struct sockaddr_ib *)dst_addr)->sib_pkey; 4041 } 4042 return rdma_bind_addr_dst(id_priv, (struct sockaddr *)&zero_sock, dst_addr); 4043 } 4044 4045 /* 4046 * If required, resolve the source address for bind and leave the id_priv in 4047 * state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior 4048 * calls made by ULP, a previously bound ID will not be re-bound and src_addr is 4049 * ignored. 4050 */ 4051 static int resolve_prepare_src(struct rdma_id_private *id_priv, 4052 struct sockaddr *src_addr, 4053 const struct sockaddr *dst_addr) 4054 { 4055 int ret; 4056 4057 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) { 4058 /* For a well behaved ULP state will be RDMA_CM_IDLE */ 4059 ret = cma_bind_addr(&id_priv->id, src_addr, dst_addr); 4060 if (ret) 4061 return ret; 4062 if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, 4063 RDMA_CM_ADDR_QUERY))) 4064 return -EINVAL; 4065 4066 } 4067 4068 if (cma_family(id_priv) != dst_addr->sa_family) { 4069 ret = -EINVAL; 4070 goto err_state; 4071 } 4072 return 0; 4073 4074 err_state: 4075 cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); 4076 return ret; 4077 } 4078 4079 int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 4080 const struct sockaddr *dst_addr, unsigned long timeout_ms) 4081 { 4082 struct rdma_id_private *id_priv = 4083 container_of(id, struct rdma_id_private, id); 4084 int ret; 4085 4086 ret = resolve_prepare_src(id_priv, src_addr, dst_addr); 4087 if (ret) 4088 return ret; 4089 4090 if (cma_any_addr(dst_addr)) { 4091 ret = cma_resolve_loopback(id_priv); 4092 } else { 4093 if (dst_addr->sa_family == AF_IB) { 4094 ret = cma_resolve_ib_addr(id_priv); 4095 } else { 4096 /* 4097 * The FSM can return back to RDMA_CM_ADDR_BOUND after 4098 * rdma_resolve_ip() is called, eg through the error 4099 * path in addr_handler(). If this happens the existing 4100 * request must be canceled before issuing a new one. 4101 * Since canceling a request is a bit slow and this 4102 * oddball path is rare, keep track once a request has 4103 * been issued. The track turns out to be a permanent 4104 * state since this is the only cancel as it is 4105 * immediately before rdma_resolve_ip(). 4106 */ 4107 if (id_priv->used_resolve_ip) 4108 rdma_addr_cancel(&id->route.addr.dev_addr); 4109 else 4110 id_priv->used_resolve_ip = 1; 4111 ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr, 4112 &id->route.addr.dev_addr, 4113 timeout_ms, addr_handler, 4114 false, id_priv); 4115 } 4116 } 4117 if (ret) 4118 goto err; 4119 4120 return 0; 4121 err: 4122 cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); 4123 return ret; 4124 } 4125 EXPORT_SYMBOL(rdma_resolve_addr); 4126 4127 int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) 4128 { 4129 struct rdma_id_private *id_priv = 4130 container_of(id, struct rdma_id_private, id); 4131 4132 return rdma_bind_addr_dst(id_priv, addr, cma_dst_addr(id_priv)); 4133 } 4134 EXPORT_SYMBOL(rdma_bind_addr); 4135 4136 static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv) 4137 { 4138 struct cma_hdr *cma_hdr; 4139 4140 cma_hdr = hdr; 4141 cma_hdr->cma_version = CMA_VERSION; 4142 if (cma_family(id_priv) == AF_INET) { 4143 struct sockaddr_in *src4, *dst4; 4144 4145 src4 = (struct sockaddr_in *) cma_src_addr(id_priv); 4146 dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv); 4147 4148 cma_set_ip_ver(cma_hdr, 4); 4149 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; 4150 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; 4151 cma_hdr->port = src4->sin_port; 4152 } else if (cma_family(id_priv) == AF_INET6) { 4153 struct sockaddr_in6 *src6, *dst6; 4154 4155 src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); 4156 dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv); 4157 4158 cma_set_ip_ver(cma_hdr, 6); 4159 cma_hdr->src_addr.ip6 = src6->sin6_addr; 4160 cma_hdr->dst_addr.ip6 = dst6->sin6_addr; 4161 cma_hdr->port = src6->sin6_port; 4162 } 4163 return 0; 4164 } 4165 4166 static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, 4167 const struct ib_cm_event *ib_event) 4168 { 4169 struct rdma_id_private *id_priv = cm_id->context; 4170 struct rdma_cm_event event = {}; 4171 const struct ib_cm_sidr_rep_event_param *rep = 4172 &ib_event->param.sidr_rep_rcvd; 4173 int ret; 4174 4175 mutex_lock(&id_priv->handler_mutex); 4176 if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT) 4177 goto out; 4178 4179 switch (ib_event->event) { 4180 case IB_CM_SIDR_REQ_ERROR: 4181 event.event = RDMA_CM_EVENT_UNREACHABLE; 4182 event.status = -ETIMEDOUT; 4183 break; 4184 case IB_CM_SIDR_REP_RECEIVED: 4185 event.param.ud.private_data = ib_event->private_data; 4186 event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE; 4187 if (rep->status != IB_SIDR_SUCCESS) { 4188 event.event = RDMA_CM_EVENT_UNREACHABLE; 4189 event.status = ib_event->param.sidr_rep_rcvd.status; 4190 pr_debug_ratelimited("RDMA CM: UNREACHABLE: bad SIDR reply. status %d\n", 4191 event.status); 4192 break; 4193 } 4194 ret = cma_set_qkey(id_priv, rep->qkey); 4195 if (ret) { 4196 pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to set qkey. status %d\n", ret); 4197 event.event = RDMA_CM_EVENT_ADDR_ERROR; 4198 event.status = ret; 4199 break; 4200 } 4201 ib_init_ah_attr_from_path(id_priv->id.device, 4202 id_priv->id.port_num, 4203 id_priv->id.route.path_rec, 4204 &event.param.ud.ah_attr, 4205 rep->sgid_attr); 4206 event.param.ud.qp_num = rep->qpn; 4207 event.param.ud.qkey = rep->qkey; 4208 event.event = RDMA_CM_EVENT_ESTABLISHED; 4209 event.status = 0; 4210 break; 4211 default: 4212 pr_err("RDMA CMA: unexpected IB CM event: %d\n", 4213 ib_event->event); 4214 goto out; 4215 } 4216 4217 ret = cma_cm_event_handler(id_priv, &event); 4218 4219 rdma_destroy_ah_attr(&event.param.ud.ah_attr); 4220 if (ret) { 4221 /* Destroy the CM ID by returning a non-zero value. */ 4222 id_priv->cm_id.ib = NULL; 4223 destroy_id_handler_unlock(id_priv); 4224 return ret; 4225 } 4226 out: 4227 mutex_unlock(&id_priv->handler_mutex); 4228 return 0; 4229 } 4230 4231 static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, 4232 struct rdma_conn_param *conn_param) 4233 { 4234 struct ib_cm_sidr_req_param req; 4235 struct ib_cm_id *id; 4236 void *private_data; 4237 u8 offset; 4238 int ret; 4239 4240 memset(&req, 0, sizeof req); 4241 offset = cma_user_data_offset(id_priv); 4242 if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len)) 4243 return -EINVAL; 4244 4245 if (req.private_data_len) { 4246 private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 4247 if (!private_data) 4248 return -ENOMEM; 4249 } else { 4250 private_data = NULL; 4251 } 4252 4253 if (conn_param->private_data && conn_param->private_data_len) 4254 memcpy(private_data + offset, conn_param->private_data, 4255 conn_param->private_data_len); 4256 4257 if (private_data) { 4258 ret = cma_format_hdr(private_data, id_priv); 4259 if (ret) 4260 goto out; 4261 req.private_data = private_data; 4262 } 4263 4264 id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler, 4265 id_priv); 4266 if (IS_ERR(id)) { 4267 ret = PTR_ERR(id); 4268 goto out; 4269 } 4270 id_priv->cm_id.ib = id; 4271 4272 req.path = id_priv->id.route.path_rec; 4273 req.sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr; 4274 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); 4275 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); 4276 req.max_cm_retries = CMA_MAX_CM_RETRIES; 4277 4278 trace_cm_send_sidr_req(id_priv); 4279 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req); 4280 if (ret) { 4281 ib_destroy_cm_id(id_priv->cm_id.ib); 4282 id_priv->cm_id.ib = NULL; 4283 } 4284 out: 4285 kfree(private_data); 4286 return ret; 4287 } 4288 4289 static int cma_connect_ib(struct rdma_id_private *id_priv, 4290 struct rdma_conn_param *conn_param) 4291 { 4292 struct ib_cm_req_param req; 4293 struct rdma_route *route; 4294 void *private_data; 4295 struct ib_cm_id *id; 4296 u8 offset; 4297 int ret; 4298 4299 memset(&req, 0, sizeof req); 4300 offset = cma_user_data_offset(id_priv); 4301 if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len)) 4302 return -EINVAL; 4303 4304 if (req.private_data_len) { 4305 private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 4306 if (!private_data) 4307 return -ENOMEM; 4308 } else { 4309 private_data = NULL; 4310 } 4311 4312 if (conn_param->private_data && conn_param->private_data_len) 4313 memcpy(private_data + offset, conn_param->private_data, 4314 conn_param->private_data_len); 4315 4316 id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv); 4317 if (IS_ERR(id)) { 4318 ret = PTR_ERR(id); 4319 goto out; 4320 } 4321 id_priv->cm_id.ib = id; 4322 4323 route = &id_priv->id.route; 4324 if (private_data) { 4325 ret = cma_format_hdr(private_data, id_priv); 4326 if (ret) 4327 goto out; 4328 req.private_data = private_data; 4329 } 4330 4331 req.primary_path = &route->path_rec[0]; 4332 req.primary_path_inbound = route->path_rec_inbound; 4333 req.primary_path_outbound = route->path_rec_outbound; 4334 if (route->num_pri_alt_paths == 2) 4335 req.alternate_path = &route->path_rec[1]; 4336 4337 req.ppath_sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr; 4338 /* Alternate path SGID attribute currently unsupported */ 4339 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); 4340 req.qp_num = id_priv->qp_num; 4341 req.qp_type = id_priv->id.qp_type; 4342 req.starting_psn = id_priv->seq_num; 4343 req.responder_resources = conn_param->responder_resources; 4344 req.initiator_depth = conn_param->initiator_depth; 4345 req.flow_control = conn_param->flow_control; 4346 req.retry_count = min_t(u8, 7, conn_param->retry_count); 4347 req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); 4348 req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 4349 req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 4350 req.max_cm_retries = CMA_MAX_CM_RETRIES; 4351 req.srq = id_priv->srq ? 1 : 0; 4352 req.ece.vendor_id = id_priv->ece.vendor_id; 4353 req.ece.attr_mod = id_priv->ece.attr_mod; 4354 4355 trace_cm_send_req(id_priv); 4356 ret = ib_send_cm_req(id_priv->cm_id.ib, &req); 4357 out: 4358 if (ret && !IS_ERR(id)) { 4359 ib_destroy_cm_id(id); 4360 id_priv->cm_id.ib = NULL; 4361 } 4362 4363 kfree(private_data); 4364 return ret; 4365 } 4366 4367 static int cma_connect_iw(struct rdma_id_private *id_priv, 4368 struct rdma_conn_param *conn_param) 4369 { 4370 struct iw_cm_id *cm_id; 4371 int ret; 4372 struct iw_cm_conn_param iw_param; 4373 4374 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv); 4375 if (IS_ERR(cm_id)) 4376 return PTR_ERR(cm_id); 4377 4378 mutex_lock(&id_priv->qp_mutex); 4379 cm_id->tos = id_priv->tos; 4380 cm_id->tos_set = id_priv->tos_set; 4381 mutex_unlock(&id_priv->qp_mutex); 4382 4383 id_priv->cm_id.iw = cm_id; 4384 4385 memcpy(&cm_id->local_addr, cma_src_addr(id_priv), 4386 rdma_addr_size(cma_src_addr(id_priv))); 4387 memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv), 4388 rdma_addr_size(cma_dst_addr(id_priv))); 4389 4390 ret = cma_modify_qp_rtr(id_priv, conn_param); 4391 if (ret) 4392 goto out; 4393 4394 if (conn_param) { 4395 iw_param.ord = conn_param->initiator_depth; 4396 iw_param.ird = conn_param->responder_resources; 4397 iw_param.private_data = conn_param->private_data; 4398 iw_param.private_data_len = conn_param->private_data_len; 4399 iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num; 4400 } else { 4401 memset(&iw_param, 0, sizeof iw_param); 4402 iw_param.qpn = id_priv->qp_num; 4403 } 4404 ret = iw_cm_connect(cm_id, &iw_param); 4405 out: 4406 if (ret) { 4407 iw_destroy_cm_id(cm_id); 4408 id_priv->cm_id.iw = NULL; 4409 } 4410 return ret; 4411 } 4412 4413 /** 4414 * rdma_connect_locked - Initiate an active connection request. 4415 * @id: Connection identifier to connect. 4416 * @conn_param: Connection information used for connected QPs. 4417 * 4418 * Same as rdma_connect() but can only be called from the 4419 * RDMA_CM_EVENT_ROUTE_RESOLVED handler callback. 4420 */ 4421 int rdma_connect_locked(struct rdma_cm_id *id, 4422 struct rdma_conn_param *conn_param) 4423 { 4424 struct rdma_id_private *id_priv = 4425 container_of(id, struct rdma_id_private, id); 4426 int ret; 4427 4428 if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT)) 4429 return -EINVAL; 4430 4431 if (!id->qp) { 4432 id_priv->qp_num = conn_param->qp_num; 4433 id_priv->srq = conn_param->srq; 4434 } 4435 4436 if (rdma_cap_ib_cm(id->device, id->port_num)) { 4437 if (id->qp_type == IB_QPT_UD) 4438 ret = cma_resolve_ib_udp(id_priv, conn_param); 4439 else 4440 ret = cma_connect_ib(id_priv, conn_param); 4441 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 4442 ret = cma_connect_iw(id_priv, conn_param); 4443 } else { 4444 ret = -ENOSYS; 4445 } 4446 if (ret) 4447 goto err_state; 4448 return 0; 4449 err_state: 4450 cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED); 4451 return ret; 4452 } 4453 EXPORT_SYMBOL(rdma_connect_locked); 4454 4455 /** 4456 * rdma_connect - Initiate an active connection request. 4457 * @id: Connection identifier to connect. 4458 * @conn_param: Connection information used for connected QPs. 4459 * 4460 * Users must have resolved a route for the rdma_cm_id to connect with by having 4461 * called rdma_resolve_route before calling this routine. 4462 * 4463 * This call will either connect to a remote QP or obtain remote QP information 4464 * for unconnected rdma_cm_id's. The actual operation is based on the 4465 * rdma_cm_id's port space. 4466 */ 4467 int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 4468 { 4469 struct rdma_id_private *id_priv = 4470 container_of(id, struct rdma_id_private, id); 4471 int ret; 4472 4473 mutex_lock(&id_priv->handler_mutex); 4474 ret = rdma_connect_locked(id, conn_param); 4475 mutex_unlock(&id_priv->handler_mutex); 4476 return ret; 4477 } 4478 EXPORT_SYMBOL(rdma_connect); 4479 4480 /** 4481 * rdma_connect_ece - Initiate an active connection request with ECE data. 4482 * @id: Connection identifier to connect. 4483 * @conn_param: Connection information used for connected QPs. 4484 * @ece: ECE parameters 4485 * 4486 * See rdma_connect() explanation. 4487 */ 4488 int rdma_connect_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, 4489 struct rdma_ucm_ece *ece) 4490 { 4491 struct rdma_id_private *id_priv = 4492 container_of(id, struct rdma_id_private, id); 4493 4494 id_priv->ece.vendor_id = ece->vendor_id; 4495 id_priv->ece.attr_mod = ece->attr_mod; 4496 4497 return rdma_connect(id, conn_param); 4498 } 4499 EXPORT_SYMBOL(rdma_connect_ece); 4500 4501 static int cma_accept_ib(struct rdma_id_private *id_priv, 4502 struct rdma_conn_param *conn_param) 4503 { 4504 struct ib_cm_rep_param rep; 4505 int ret; 4506 4507 ret = cma_modify_qp_rtr(id_priv, conn_param); 4508 if (ret) 4509 goto out; 4510 4511 ret = cma_modify_qp_rts(id_priv, conn_param); 4512 if (ret) 4513 goto out; 4514 4515 memset(&rep, 0, sizeof rep); 4516 rep.qp_num = id_priv->qp_num; 4517 rep.starting_psn = id_priv->seq_num; 4518 rep.private_data = conn_param->private_data; 4519 rep.private_data_len = conn_param->private_data_len; 4520 rep.responder_resources = conn_param->responder_resources; 4521 rep.initiator_depth = conn_param->initiator_depth; 4522 rep.failover_accepted = 0; 4523 rep.flow_control = conn_param->flow_control; 4524 rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); 4525 rep.srq = id_priv->srq ? 1 : 0; 4526 rep.ece.vendor_id = id_priv->ece.vendor_id; 4527 rep.ece.attr_mod = id_priv->ece.attr_mod; 4528 4529 trace_cm_send_rep(id_priv); 4530 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); 4531 out: 4532 return ret; 4533 } 4534 4535 static int cma_accept_iw(struct rdma_id_private *id_priv, 4536 struct rdma_conn_param *conn_param) 4537 { 4538 struct iw_cm_conn_param iw_param; 4539 int ret; 4540 4541 if (!conn_param) 4542 return -EINVAL; 4543 4544 ret = cma_modify_qp_rtr(id_priv, conn_param); 4545 if (ret) 4546 return ret; 4547 4548 iw_param.ord = conn_param->initiator_depth; 4549 iw_param.ird = conn_param->responder_resources; 4550 iw_param.private_data = conn_param->private_data; 4551 iw_param.private_data_len = conn_param->private_data_len; 4552 if (id_priv->id.qp) 4553 iw_param.qpn = id_priv->qp_num; 4554 else 4555 iw_param.qpn = conn_param->qp_num; 4556 4557 return iw_cm_accept(id_priv->cm_id.iw, &iw_param); 4558 } 4559 4560 static int cma_send_sidr_rep(struct rdma_id_private *id_priv, 4561 enum ib_cm_sidr_status status, u32 qkey, 4562 const void *private_data, int private_data_len) 4563 { 4564 struct ib_cm_sidr_rep_param rep; 4565 int ret; 4566 4567 memset(&rep, 0, sizeof rep); 4568 rep.status = status; 4569 if (status == IB_SIDR_SUCCESS) { 4570 ret = cma_set_qkey(id_priv, qkey); 4571 if (ret) 4572 return ret; 4573 rep.qp_num = id_priv->qp_num; 4574 rep.qkey = id_priv->qkey; 4575 4576 rep.ece.vendor_id = id_priv->ece.vendor_id; 4577 rep.ece.attr_mod = id_priv->ece.attr_mod; 4578 } 4579 4580 rep.private_data = private_data; 4581 rep.private_data_len = private_data_len; 4582 4583 trace_cm_send_sidr_rep(id_priv); 4584 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep); 4585 } 4586 4587 /** 4588 * rdma_accept - Called to accept a connection request or response. 4589 * @id: Connection identifier associated with the request. 4590 * @conn_param: Information needed to establish the connection. This must be 4591 * provided if accepting a connection request. If accepting a connection 4592 * response, this parameter must be NULL. 4593 * 4594 * Typically, this routine is only called by the listener to accept a connection 4595 * request. It must also be called on the active side of a connection if the 4596 * user is performing their own QP transitions. 4597 * 4598 * In the case of error, a reject message is sent to the remote side and the 4599 * state of the qp associated with the id is modified to error, such that any 4600 * previously posted receive buffers would be flushed. 4601 * 4602 * This function is for use by kernel ULPs and must be called from under the 4603 * handler callback. 4604 */ 4605 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 4606 { 4607 struct rdma_id_private *id_priv = 4608 container_of(id, struct rdma_id_private, id); 4609 int ret; 4610 4611 lockdep_assert_held(&id_priv->handler_mutex); 4612 4613 if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT) 4614 return -EINVAL; 4615 4616 if (!id->qp && conn_param) { 4617 id_priv->qp_num = conn_param->qp_num; 4618 id_priv->srq = conn_param->srq; 4619 } 4620 4621 if (rdma_cap_ib_cm(id->device, id->port_num)) { 4622 if (id->qp_type == IB_QPT_UD) { 4623 if (conn_param) 4624 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 4625 conn_param->qkey, 4626 conn_param->private_data, 4627 conn_param->private_data_len); 4628 else 4629 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 4630 0, NULL, 0); 4631 } else { 4632 if (conn_param) 4633 ret = cma_accept_ib(id_priv, conn_param); 4634 else 4635 ret = cma_rep_recv(id_priv); 4636 } 4637 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 4638 ret = cma_accept_iw(id_priv, conn_param); 4639 } else { 4640 ret = -ENOSYS; 4641 } 4642 if (ret) 4643 goto reject; 4644 4645 return 0; 4646 reject: 4647 cma_modify_qp_err(id_priv); 4648 rdma_reject(id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED); 4649 return ret; 4650 } 4651 EXPORT_SYMBOL(rdma_accept); 4652 4653 int rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, 4654 struct rdma_ucm_ece *ece) 4655 { 4656 struct rdma_id_private *id_priv = 4657 container_of(id, struct rdma_id_private, id); 4658 4659 id_priv->ece.vendor_id = ece->vendor_id; 4660 id_priv->ece.attr_mod = ece->attr_mod; 4661 4662 return rdma_accept(id, conn_param); 4663 } 4664 EXPORT_SYMBOL(rdma_accept_ece); 4665 4666 void rdma_lock_handler(struct rdma_cm_id *id) 4667 { 4668 struct rdma_id_private *id_priv = 4669 container_of(id, struct rdma_id_private, id); 4670 4671 mutex_lock(&id_priv->handler_mutex); 4672 } 4673 EXPORT_SYMBOL(rdma_lock_handler); 4674 4675 void rdma_unlock_handler(struct rdma_cm_id *id) 4676 { 4677 struct rdma_id_private *id_priv = 4678 container_of(id, struct rdma_id_private, id); 4679 4680 mutex_unlock(&id_priv->handler_mutex); 4681 } 4682 EXPORT_SYMBOL(rdma_unlock_handler); 4683 4684 int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event) 4685 { 4686 struct rdma_id_private *id_priv; 4687 int ret; 4688 4689 id_priv = container_of(id, struct rdma_id_private, id); 4690 if (!id_priv->cm_id.ib) 4691 return -EINVAL; 4692 4693 switch (id->device->node_type) { 4694 case RDMA_NODE_IB_CA: 4695 ret = ib_cm_notify(id_priv->cm_id.ib, event); 4696 break; 4697 default: 4698 ret = 0; 4699 break; 4700 } 4701 return ret; 4702 } 4703 EXPORT_SYMBOL(rdma_notify); 4704 4705 int rdma_reject(struct rdma_cm_id *id, const void *private_data, 4706 u8 private_data_len, u8 reason) 4707 { 4708 struct rdma_id_private *id_priv; 4709 int ret; 4710 4711 id_priv = container_of(id, struct rdma_id_private, id); 4712 if (!id_priv->cm_id.ib) 4713 return -EINVAL; 4714 4715 if (rdma_cap_ib_cm(id->device, id->port_num)) { 4716 if (id->qp_type == IB_QPT_UD) { 4717 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0, 4718 private_data, private_data_len); 4719 } else { 4720 trace_cm_send_rej(id_priv); 4721 ret = ib_send_cm_rej(id_priv->cm_id.ib, reason, NULL, 0, 4722 private_data, private_data_len); 4723 } 4724 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 4725 ret = iw_cm_reject(id_priv->cm_id.iw, 4726 private_data, private_data_len); 4727 } else { 4728 ret = -ENOSYS; 4729 } 4730 4731 return ret; 4732 } 4733 EXPORT_SYMBOL(rdma_reject); 4734 4735 int rdma_disconnect(struct rdma_cm_id *id) 4736 { 4737 struct rdma_id_private *id_priv; 4738 int ret; 4739 4740 id_priv = container_of(id, struct rdma_id_private, id); 4741 if (!id_priv->cm_id.ib) 4742 return -EINVAL; 4743 4744 if (rdma_cap_ib_cm(id->device, id->port_num)) { 4745 ret = cma_modify_qp_err(id_priv); 4746 if (ret) 4747 goto out; 4748 /* Initiate or respond to a disconnect. */ 4749 trace_cm_disconnect(id_priv); 4750 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) { 4751 if (!ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0)) 4752 trace_cm_sent_drep(id_priv); 4753 } else { 4754 trace_cm_sent_dreq(id_priv); 4755 } 4756 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 4757 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); 4758 } else 4759 ret = -EINVAL; 4760 4761 out: 4762 return ret; 4763 } 4764 EXPORT_SYMBOL(rdma_disconnect); 4765 4766 static void cma_make_mc_event(int status, struct rdma_id_private *id_priv, 4767 struct ib_sa_multicast *multicast, 4768 struct rdma_cm_event *event, 4769 struct cma_multicast *mc) 4770 { 4771 struct rdma_dev_addr *dev_addr; 4772 enum ib_gid_type gid_type; 4773 struct net_device *ndev; 4774 4775 if (!status) 4776 status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); 4777 else 4778 pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n", 4779 status); 4780 4781 event->status = status; 4782 event->param.ud.private_data = mc->context; 4783 if (status) { 4784 event->event = RDMA_CM_EVENT_MULTICAST_ERROR; 4785 return; 4786 } 4787 4788 dev_addr = &id_priv->id.route.addr.dev_addr; 4789 ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); 4790 gid_type = 4791 id_priv->cma_dev 4792 ->default_gid_type[id_priv->id.port_num - 4793 rdma_start_port( 4794 id_priv->cma_dev->device)]; 4795 4796 event->event = RDMA_CM_EVENT_MULTICAST_JOIN; 4797 if (ib_init_ah_from_mcmember(id_priv->id.device, id_priv->id.port_num, 4798 &multicast->rec, ndev, gid_type, 4799 &event->param.ud.ah_attr)) { 4800 event->event = RDMA_CM_EVENT_MULTICAST_ERROR; 4801 goto out; 4802 } 4803 4804 event->param.ud.qp_num = 0xFFFFFF; 4805 event->param.ud.qkey = be32_to_cpu(multicast->rec.qkey); 4806 4807 out: 4808 if (ndev) 4809 dev_put(ndev); 4810 } 4811 4812 static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) 4813 { 4814 struct cma_multicast *mc = multicast->context; 4815 struct rdma_id_private *id_priv = mc->id_priv; 4816 struct rdma_cm_event event = {}; 4817 int ret = 0; 4818 4819 mutex_lock(&id_priv->handler_mutex); 4820 if (READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL || 4821 READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING) 4822 goto out; 4823 4824 cma_make_mc_event(status, id_priv, multicast, &event, mc); 4825 ret = cma_cm_event_handler(id_priv, &event); 4826 rdma_destroy_ah_attr(&event.param.ud.ah_attr); 4827 WARN_ON(ret); 4828 4829 out: 4830 mutex_unlock(&id_priv->handler_mutex); 4831 return 0; 4832 } 4833 4834 static void cma_set_mgid(struct rdma_id_private *id_priv, 4835 struct sockaddr *addr, union ib_gid *mgid) 4836 { 4837 unsigned char mc_map[MAX_ADDR_LEN]; 4838 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 4839 struct sockaddr_in *sin = (struct sockaddr_in *) addr; 4840 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr; 4841 4842 if (cma_any_addr(addr)) { 4843 memset(mgid, 0, sizeof *mgid); 4844 } else if ((addr->sa_family == AF_INET6) && 4845 ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) == 4846 0xFF10A01B)) { 4847 /* IPv6 address is an SA assigned MGID. */ 4848 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); 4849 } else if (addr->sa_family == AF_IB) { 4850 memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid); 4851 } else if (addr->sa_family == AF_INET6) { 4852 ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map); 4853 if (id_priv->id.ps == RDMA_PS_UDP) 4854 mc_map[7] = 0x01; /* Use RDMA CM signature */ 4855 *mgid = *(union ib_gid *) (mc_map + 4); 4856 } else { 4857 ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map); 4858 if (id_priv->id.ps == RDMA_PS_UDP) 4859 mc_map[7] = 0x01; /* Use RDMA CM signature */ 4860 *mgid = *(union ib_gid *) (mc_map + 4); 4861 } 4862 } 4863 4864 static int cma_join_ib_multicast(struct rdma_id_private *id_priv, 4865 struct cma_multicast *mc) 4866 { 4867 struct ib_sa_mcmember_rec rec; 4868 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 4869 ib_sa_comp_mask comp_mask; 4870 int ret; 4871 4872 ib_addr_get_mgid(dev_addr, &rec.mgid); 4873 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num, 4874 &rec.mgid, &rec); 4875 if (ret) 4876 return ret; 4877 4878 ret = cma_set_qkey(id_priv, 0); 4879 if (ret) 4880 return ret; 4881 4882 cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid); 4883 rec.qkey = cpu_to_be32(id_priv->qkey); 4884 rdma_addr_get_sgid(dev_addr, &rec.port_gid); 4885 rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 4886 rec.join_state = mc->join_state; 4887 4888 comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | 4889 IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE | 4890 IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL | 4891 IB_SA_MCMEMBER_REC_FLOW_LABEL | 4892 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; 4893 4894 if (id_priv->id.ps == RDMA_PS_IPOIB) 4895 comp_mask |= IB_SA_MCMEMBER_REC_RATE | 4896 IB_SA_MCMEMBER_REC_RATE_SELECTOR | 4897 IB_SA_MCMEMBER_REC_MTU_SELECTOR | 4898 IB_SA_MCMEMBER_REC_MTU | 4899 IB_SA_MCMEMBER_REC_HOP_LIMIT; 4900 4901 mc->sa_mc = ib_sa_join_multicast(&sa_client, id_priv->id.device, 4902 id_priv->id.port_num, &rec, comp_mask, 4903 GFP_KERNEL, cma_ib_mc_handler, mc); 4904 return PTR_ERR_OR_ZERO(mc->sa_mc); 4905 } 4906 4907 static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid, 4908 enum ib_gid_type gid_type) 4909 { 4910 struct sockaddr_in *sin = (struct sockaddr_in *)addr; 4911 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr; 4912 4913 if (cma_any_addr(addr)) { 4914 memset(mgid, 0, sizeof *mgid); 4915 } else if (addr->sa_family == AF_INET6) { 4916 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); 4917 } else { 4918 mgid->raw[0] = 4919 (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0xff; 4920 mgid->raw[1] = 4921 (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0x0e; 4922 mgid->raw[2] = 0; 4923 mgid->raw[3] = 0; 4924 mgid->raw[4] = 0; 4925 mgid->raw[5] = 0; 4926 mgid->raw[6] = 0; 4927 mgid->raw[7] = 0; 4928 mgid->raw[8] = 0; 4929 mgid->raw[9] = 0; 4930 mgid->raw[10] = 0xff; 4931 mgid->raw[11] = 0xff; 4932 *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr; 4933 } 4934 } 4935 4936 static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, 4937 struct cma_multicast *mc) 4938 { 4939 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 4940 int err = 0; 4941 struct sockaddr *addr = (struct sockaddr *)&mc->addr; 4942 struct net_device *ndev = NULL; 4943 struct ib_sa_multicast ib; 4944 enum ib_gid_type gid_type; 4945 bool send_only; 4946 4947 send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN); 4948 4949 if (cma_zero_addr(addr)) 4950 return -EINVAL; 4951 4952 gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num - 4953 rdma_start_port(id_priv->cma_dev->device)]; 4954 cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type); 4955 4956 ib.rec.pkey = cpu_to_be16(0xffff); 4957 if (id_priv->id.ps == RDMA_PS_UDP) 4958 ib.rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); 4959 4960 if (dev_addr->bound_dev_if) 4961 ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); 4962 if (!ndev) 4963 return -ENODEV; 4964 4965 ib.rec.rate = iboe_get_rate(ndev); 4966 ib.rec.hop_limit = 1; 4967 ib.rec.mtu = iboe_get_mtu(ndev->mtu); 4968 4969 if (addr->sa_family == AF_INET) { 4970 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) { 4971 ib.rec.hop_limit = IPV6_DEFAULT_HOPLIMIT; 4972 if (!send_only) { 4973 err = cma_igmp_send(ndev, &ib.rec.mgid, 4974 true); 4975 } 4976 } 4977 } else { 4978 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) 4979 err = -ENOTSUPP; 4980 } 4981 dev_put(ndev); 4982 if (err || !ib.rec.mtu) 4983 return err ?: -EINVAL; 4984 4985 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, 4986 &ib.rec.port_gid); 4987 INIT_WORK(&mc->iboe_join.work, cma_iboe_join_work_handler); 4988 cma_make_mc_event(0, id_priv, &ib, &mc->iboe_join.event, mc); 4989 queue_work(cma_wq, &mc->iboe_join.work); 4990 return 0; 4991 } 4992 4993 int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, 4994 u8 join_state, void *context) 4995 { 4996 struct rdma_id_private *id_priv = 4997 container_of(id, struct rdma_id_private, id); 4998 struct cma_multicast *mc; 4999 int ret; 5000 5001 /* Not supported for kernel QPs */ 5002 if (WARN_ON(id->qp)) 5003 return -EINVAL; 5004 5005 /* ULP is calling this wrong. */ 5006 if (!id->device || (READ_ONCE(id_priv->state) != RDMA_CM_ADDR_BOUND && 5007 READ_ONCE(id_priv->state) != RDMA_CM_ADDR_RESOLVED)) 5008 return -EINVAL; 5009 5010 mc = kzalloc(sizeof(*mc), GFP_KERNEL); 5011 if (!mc) 5012 return -ENOMEM; 5013 5014 memcpy(&mc->addr, addr, rdma_addr_size(addr)); 5015 mc->context = context; 5016 mc->id_priv = id_priv; 5017 mc->join_state = join_state; 5018 5019 if (rdma_protocol_roce(id->device, id->port_num)) { 5020 ret = cma_iboe_join_multicast(id_priv, mc); 5021 if (ret) 5022 goto out_err; 5023 } else if (rdma_cap_ib_mcast(id->device, id->port_num)) { 5024 ret = cma_join_ib_multicast(id_priv, mc); 5025 if (ret) 5026 goto out_err; 5027 } else { 5028 ret = -ENOSYS; 5029 goto out_err; 5030 } 5031 5032 spin_lock(&id_priv->lock); 5033 list_add(&mc->list, &id_priv->mc_list); 5034 spin_unlock(&id_priv->lock); 5035 5036 return 0; 5037 out_err: 5038 kfree(mc); 5039 return ret; 5040 } 5041 EXPORT_SYMBOL(rdma_join_multicast); 5042 5043 void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) 5044 { 5045 struct rdma_id_private *id_priv; 5046 struct cma_multicast *mc; 5047 5048 id_priv = container_of(id, struct rdma_id_private, id); 5049 spin_lock_irq(&id_priv->lock); 5050 list_for_each_entry(mc, &id_priv->mc_list, list) { 5051 if (memcmp(&mc->addr, addr, rdma_addr_size(addr)) != 0) 5052 continue; 5053 list_del(&mc->list); 5054 spin_unlock_irq(&id_priv->lock); 5055 5056 WARN_ON(id_priv->cma_dev->device != id->device); 5057 destroy_mc(id_priv, mc); 5058 return; 5059 } 5060 spin_unlock_irq(&id_priv->lock); 5061 } 5062 EXPORT_SYMBOL(rdma_leave_multicast); 5063 5064 static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv) 5065 { 5066 struct rdma_dev_addr *dev_addr; 5067 struct cma_work *work; 5068 5069 dev_addr = &id_priv->id.route.addr.dev_addr; 5070 5071 if ((dev_addr->bound_dev_if == ndev->ifindex) && 5072 (net_eq(dev_net(ndev), dev_addr->net)) && 5073 memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) { 5074 pr_info("RDMA CM addr change for ndev %s used by id %p\n", 5075 ndev->name, &id_priv->id); 5076 work = kzalloc(sizeof *work, GFP_KERNEL); 5077 if (!work) 5078 return -ENOMEM; 5079 5080 INIT_WORK(&work->work, cma_work_handler); 5081 work->id = id_priv; 5082 work->event.event = RDMA_CM_EVENT_ADDR_CHANGE; 5083 cma_id_get(id_priv); 5084 queue_work(cma_wq, &work->work); 5085 } 5086 5087 return 0; 5088 } 5089 5090 static int cma_netdev_callback(struct notifier_block *self, unsigned long event, 5091 void *ptr) 5092 { 5093 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 5094 struct cma_device *cma_dev; 5095 struct rdma_id_private *id_priv; 5096 int ret = NOTIFY_DONE; 5097 5098 if (event != NETDEV_BONDING_FAILOVER) 5099 return NOTIFY_DONE; 5100 5101 if (!netif_is_bond_master(ndev)) 5102 return NOTIFY_DONE; 5103 5104 mutex_lock(&lock); 5105 list_for_each_entry(cma_dev, &dev_list, list) 5106 list_for_each_entry(id_priv, &cma_dev->id_list, device_item) { 5107 ret = cma_netdev_change(ndev, id_priv); 5108 if (ret) 5109 goto out; 5110 } 5111 5112 out: 5113 mutex_unlock(&lock); 5114 return ret; 5115 } 5116 5117 static void cma_netevent_work_handler(struct work_struct *_work) 5118 { 5119 struct rdma_id_private *id_priv = 5120 container_of(_work, struct rdma_id_private, id.net_work); 5121 struct rdma_cm_event event = {}; 5122 5123 mutex_lock(&id_priv->handler_mutex); 5124 5125 if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING || 5126 READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL) 5127 goto out_unlock; 5128 5129 event.event = RDMA_CM_EVENT_UNREACHABLE; 5130 event.status = -ETIMEDOUT; 5131 5132 if (cma_cm_event_handler(id_priv, &event)) { 5133 __acquire(&id_priv->handler_mutex); 5134 id_priv->cm_id.ib = NULL; 5135 cma_id_put(id_priv); 5136 destroy_id_handler_unlock(id_priv); 5137 return; 5138 } 5139 5140 out_unlock: 5141 mutex_unlock(&id_priv->handler_mutex); 5142 cma_id_put(id_priv); 5143 } 5144 5145 static int cma_netevent_callback(struct notifier_block *self, 5146 unsigned long event, void *ctx) 5147 { 5148 struct id_table_entry *ips_node = NULL; 5149 struct rdma_id_private *current_id; 5150 struct neighbour *neigh = ctx; 5151 unsigned long flags; 5152 5153 if (event != NETEVENT_NEIGH_UPDATE) 5154 return NOTIFY_DONE; 5155 5156 spin_lock_irqsave(&id_table_lock, flags); 5157 if (neigh->tbl->family == AF_INET6) { 5158 struct sockaddr_in6 neigh_sock_6; 5159 5160 neigh_sock_6.sin6_family = AF_INET6; 5161 neigh_sock_6.sin6_addr = *(struct in6_addr *)neigh->primary_key; 5162 ips_node = node_from_ndev_ip(&id_table, neigh->dev->ifindex, 5163 (struct sockaddr *)&neigh_sock_6); 5164 } else if (neigh->tbl->family == AF_INET) { 5165 struct sockaddr_in neigh_sock_4; 5166 5167 neigh_sock_4.sin_family = AF_INET; 5168 neigh_sock_4.sin_addr.s_addr = *(__be32 *)(neigh->primary_key); 5169 ips_node = node_from_ndev_ip(&id_table, neigh->dev->ifindex, 5170 (struct sockaddr *)&neigh_sock_4); 5171 } else 5172 goto out; 5173 5174 if (!ips_node) 5175 goto out; 5176 5177 list_for_each_entry(current_id, &ips_node->id_list, id_list_entry) { 5178 if (!memcmp(current_id->id.route.addr.dev_addr.dst_dev_addr, 5179 neigh->ha, ETH_ALEN)) 5180 continue; 5181 INIT_WORK(¤t_id->id.net_work, cma_netevent_work_handler); 5182 cma_id_get(current_id); 5183 queue_work(cma_wq, ¤t_id->id.net_work); 5184 } 5185 out: 5186 spin_unlock_irqrestore(&id_table_lock, flags); 5187 return NOTIFY_DONE; 5188 } 5189 5190 static struct notifier_block cma_nb = { 5191 .notifier_call = cma_netdev_callback 5192 }; 5193 5194 static struct notifier_block cma_netevent_cb = { 5195 .notifier_call = cma_netevent_callback 5196 }; 5197 5198 static void cma_send_device_removal_put(struct rdma_id_private *id_priv) 5199 { 5200 struct rdma_cm_event event = { .event = RDMA_CM_EVENT_DEVICE_REMOVAL }; 5201 enum rdma_cm_state state; 5202 unsigned long flags; 5203 5204 mutex_lock(&id_priv->handler_mutex); 5205 /* Record that we want to remove the device */ 5206 spin_lock_irqsave(&id_priv->lock, flags); 5207 state = id_priv->state; 5208 if (state == RDMA_CM_DESTROYING || state == RDMA_CM_DEVICE_REMOVAL) { 5209 spin_unlock_irqrestore(&id_priv->lock, flags); 5210 mutex_unlock(&id_priv->handler_mutex); 5211 cma_id_put(id_priv); 5212 return; 5213 } 5214 id_priv->state = RDMA_CM_DEVICE_REMOVAL; 5215 spin_unlock_irqrestore(&id_priv->lock, flags); 5216 5217 if (cma_cm_event_handler(id_priv, &event)) { 5218 /* 5219 * At this point the ULP promises it won't call 5220 * rdma_destroy_id() concurrently 5221 */ 5222 cma_id_put(id_priv); 5223 mutex_unlock(&id_priv->handler_mutex); 5224 trace_cm_id_destroy(id_priv); 5225 _destroy_id(id_priv, state); 5226 return; 5227 } 5228 mutex_unlock(&id_priv->handler_mutex); 5229 5230 /* 5231 * If this races with destroy then the thread that first assigns state 5232 * to a destroying does the cancel. 5233 */ 5234 cma_cancel_operation(id_priv, state); 5235 cma_id_put(id_priv); 5236 } 5237 5238 static void cma_process_remove(struct cma_device *cma_dev) 5239 { 5240 mutex_lock(&lock); 5241 while (!list_empty(&cma_dev->id_list)) { 5242 struct rdma_id_private *id_priv = list_first_entry( 5243 &cma_dev->id_list, struct rdma_id_private, device_item); 5244 5245 list_del_init(&id_priv->listen_item); 5246 list_del_init(&id_priv->device_item); 5247 cma_id_get(id_priv); 5248 mutex_unlock(&lock); 5249 5250 cma_send_device_removal_put(id_priv); 5251 5252 mutex_lock(&lock); 5253 } 5254 mutex_unlock(&lock); 5255 5256 cma_dev_put(cma_dev); 5257 wait_for_completion(&cma_dev->comp); 5258 } 5259 5260 static bool cma_supported(struct ib_device *device) 5261 { 5262 u32 i; 5263 5264 rdma_for_each_port(device, i) { 5265 if (rdma_cap_ib_cm(device, i) || rdma_cap_iw_cm(device, i)) 5266 return true; 5267 } 5268 return false; 5269 } 5270 5271 static int cma_add_one(struct ib_device *device) 5272 { 5273 struct rdma_id_private *to_destroy; 5274 struct cma_device *cma_dev; 5275 struct rdma_id_private *id_priv; 5276 unsigned long supported_gids = 0; 5277 int ret; 5278 u32 i; 5279 5280 if (!cma_supported(device)) 5281 return -EOPNOTSUPP; 5282 5283 cma_dev = kmalloc(sizeof(*cma_dev), GFP_KERNEL); 5284 if (!cma_dev) 5285 return -ENOMEM; 5286 5287 cma_dev->device = device; 5288 cma_dev->default_gid_type = kcalloc(device->phys_port_cnt, 5289 sizeof(*cma_dev->default_gid_type), 5290 GFP_KERNEL); 5291 if (!cma_dev->default_gid_type) { 5292 ret = -ENOMEM; 5293 goto free_cma_dev; 5294 } 5295 5296 cma_dev->default_roce_tos = kcalloc(device->phys_port_cnt, 5297 sizeof(*cma_dev->default_roce_tos), 5298 GFP_KERNEL); 5299 if (!cma_dev->default_roce_tos) { 5300 ret = -ENOMEM; 5301 goto free_gid_type; 5302 } 5303 5304 rdma_for_each_port (device, i) { 5305 supported_gids = roce_gid_type_mask_support(device, i); 5306 WARN_ON(!supported_gids); 5307 if (supported_gids & (1 << CMA_PREFERRED_ROCE_GID_TYPE)) 5308 cma_dev->default_gid_type[i - rdma_start_port(device)] = 5309 CMA_PREFERRED_ROCE_GID_TYPE; 5310 else 5311 cma_dev->default_gid_type[i - rdma_start_port(device)] = 5312 find_first_bit(&supported_gids, BITS_PER_LONG); 5313 cma_dev->default_roce_tos[i - rdma_start_port(device)] = 0; 5314 } 5315 5316 init_completion(&cma_dev->comp); 5317 refcount_set(&cma_dev->refcount, 1); 5318 INIT_LIST_HEAD(&cma_dev->id_list); 5319 ib_set_client_data(device, &cma_client, cma_dev); 5320 5321 mutex_lock(&lock); 5322 list_add_tail(&cma_dev->list, &dev_list); 5323 list_for_each_entry(id_priv, &listen_any_list, listen_any_item) { 5324 ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy); 5325 if (ret) 5326 goto free_listen; 5327 } 5328 mutex_unlock(&lock); 5329 5330 trace_cm_add_one(device); 5331 return 0; 5332 5333 free_listen: 5334 list_del(&cma_dev->list); 5335 mutex_unlock(&lock); 5336 5337 /* cma_process_remove() will delete to_destroy */ 5338 cma_process_remove(cma_dev); 5339 kfree(cma_dev->default_roce_tos); 5340 free_gid_type: 5341 kfree(cma_dev->default_gid_type); 5342 5343 free_cma_dev: 5344 kfree(cma_dev); 5345 return ret; 5346 } 5347 5348 static void cma_remove_one(struct ib_device *device, void *client_data) 5349 { 5350 struct cma_device *cma_dev = client_data; 5351 5352 trace_cm_remove_one(device); 5353 5354 mutex_lock(&lock); 5355 list_del(&cma_dev->list); 5356 mutex_unlock(&lock); 5357 5358 cma_process_remove(cma_dev); 5359 kfree(cma_dev->default_roce_tos); 5360 kfree(cma_dev->default_gid_type); 5361 kfree(cma_dev); 5362 } 5363 5364 static int cma_init_net(struct net *net) 5365 { 5366 struct cma_pernet *pernet = cma_pernet(net); 5367 5368 xa_init(&pernet->tcp_ps); 5369 xa_init(&pernet->udp_ps); 5370 xa_init(&pernet->ipoib_ps); 5371 xa_init(&pernet->ib_ps); 5372 5373 return 0; 5374 } 5375 5376 static void cma_exit_net(struct net *net) 5377 { 5378 struct cma_pernet *pernet = cma_pernet(net); 5379 5380 WARN_ON(!xa_empty(&pernet->tcp_ps)); 5381 WARN_ON(!xa_empty(&pernet->udp_ps)); 5382 WARN_ON(!xa_empty(&pernet->ipoib_ps)); 5383 WARN_ON(!xa_empty(&pernet->ib_ps)); 5384 } 5385 5386 static struct pernet_operations cma_pernet_operations = { 5387 .init = cma_init_net, 5388 .exit = cma_exit_net, 5389 .id = &cma_pernet_id, 5390 .size = sizeof(struct cma_pernet), 5391 }; 5392 5393 static int __init cma_init(void) 5394 { 5395 int ret; 5396 5397 /* 5398 * There is a rare lock ordering dependency in cma_netdev_callback() 5399 * that only happens when bonding is enabled. Teach lockdep that rtnl 5400 * must never be nested under lock so it can find these without having 5401 * to test with bonding. 5402 */ 5403 if (IS_ENABLED(CONFIG_LOCKDEP)) { 5404 rtnl_lock(); 5405 mutex_lock(&lock); 5406 mutex_unlock(&lock); 5407 rtnl_unlock(); 5408 } 5409 5410 cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM); 5411 if (!cma_wq) 5412 return -ENOMEM; 5413 5414 ret = register_pernet_subsys(&cma_pernet_operations); 5415 if (ret) 5416 goto err_wq; 5417 5418 ib_sa_register_client(&sa_client); 5419 register_netdevice_notifier(&cma_nb); 5420 register_netevent_notifier(&cma_netevent_cb); 5421 5422 ret = ib_register_client(&cma_client); 5423 if (ret) 5424 goto err; 5425 5426 ret = cma_configfs_init(); 5427 if (ret) 5428 goto err_ib; 5429 5430 return 0; 5431 5432 err_ib: 5433 ib_unregister_client(&cma_client); 5434 err: 5435 unregister_netevent_notifier(&cma_netevent_cb); 5436 unregister_netdevice_notifier(&cma_nb); 5437 ib_sa_unregister_client(&sa_client); 5438 unregister_pernet_subsys(&cma_pernet_operations); 5439 err_wq: 5440 destroy_workqueue(cma_wq); 5441 return ret; 5442 } 5443 5444 static void __exit cma_cleanup(void) 5445 { 5446 cma_configfs_exit(); 5447 ib_unregister_client(&cma_client); 5448 unregister_netevent_notifier(&cma_netevent_cb); 5449 unregister_netdevice_notifier(&cma_nb); 5450 ib_sa_unregister_client(&sa_client); 5451 unregister_pernet_subsys(&cma_pernet_operations); 5452 destroy_workqueue(cma_wq); 5453 } 5454 5455 module_init(cma_init); 5456 module_exit(cma_cleanup); 5457