1 /* 2 * Copyright (c) 2005 Voltaire Inc. All rights reserved. 3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved. 4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved. 5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/completion.h> 37 #include <linux/in.h> 38 #include <linux/in6.h> 39 #include <linux/mutex.h> 40 #include <linux/random.h> 41 #include <linux/idr.h> 42 #include <linux/inetdevice.h> 43 #include <linux/slab.h> 44 #include <linux/module.h> 45 #include <net/route.h> 46 47 #include <net/tcp.h> 48 #include <net/ipv6.h> 49 50 #include <rdma/rdma_cm.h> 51 #include <rdma/rdma_cm_ib.h> 52 #include <rdma/rdma_netlink.h> 53 #include <rdma/ib_cache.h> 54 #include <rdma/ib_cm.h> 55 #include <rdma/ib_sa.h> 56 #include <rdma/iw_cm.h> 57 58 MODULE_AUTHOR("Sean Hefty"); 59 MODULE_DESCRIPTION("Generic RDMA CM Agent"); 60 MODULE_LICENSE("Dual BSD/GPL"); 61 62 #define CMA_CM_RESPONSE_TIMEOUT 20 63 #define CMA_MAX_CM_RETRIES 15 64 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) 65 #define CMA_IBOE_PACKET_LIFETIME 18 66 67 static void cma_add_one(struct ib_device *device); 68 static void cma_remove_one(struct ib_device *device); 69 70 static struct ib_client cma_client = { 71 .name = "cma", 72 .add = cma_add_one, 73 .remove = cma_remove_one 74 }; 75 76 static struct ib_sa_client sa_client; 77 static struct rdma_addr_client addr_client; 78 static LIST_HEAD(dev_list); 79 static LIST_HEAD(listen_any_list); 80 static DEFINE_MUTEX(lock); 81 static struct workqueue_struct *cma_wq; 82 static DEFINE_IDR(sdp_ps); 83 static DEFINE_IDR(tcp_ps); 84 static DEFINE_IDR(udp_ps); 85 static DEFINE_IDR(ipoib_ps); 86 static DEFINE_IDR(ib_ps); 87 88 struct cma_device { 89 struct list_head list; 90 struct ib_device *device; 91 struct completion comp; 92 atomic_t refcount; 93 struct list_head id_list; 94 }; 95 96 struct rdma_bind_list { 97 struct idr *ps; 98 struct hlist_head owners; 99 unsigned short port; 100 }; 101 102 enum { 103 CMA_OPTION_AFONLY, 104 }; 105 106 /* 107 * Device removal can occur at anytime, so we need extra handling to 108 * serialize notifying the user of device removal with other callbacks. 109 * We do this by disabling removal notification while a callback is in process, 110 * and reporting it after the callback completes. 111 */ 112 struct rdma_id_private { 113 struct rdma_cm_id id; 114 115 struct rdma_bind_list *bind_list; 116 struct hlist_node node; 117 struct list_head list; /* listen_any_list or cma_device.list */ 118 struct list_head listen_list; /* per device listens */ 119 struct cma_device *cma_dev; 120 struct list_head mc_list; 121 122 int internal_id; 123 enum rdma_cm_state state; 124 spinlock_t lock; 125 struct mutex qp_mutex; 126 127 struct completion comp; 128 atomic_t refcount; 129 struct mutex handler_mutex; 130 131 int backlog; 132 int timeout_ms; 133 struct ib_sa_query *query; 134 int query_id; 135 union { 136 struct ib_cm_id *ib; 137 struct iw_cm_id *iw; 138 } cm_id; 139 140 u32 seq_num; 141 u32 qkey; 142 u32 qp_num; 143 pid_t owner; 144 u32 options; 145 u8 srq; 146 u8 tos; 147 u8 reuseaddr; 148 u8 afonly; 149 }; 150 151 struct cma_multicast { 152 struct rdma_id_private *id_priv; 153 union { 154 struct ib_sa_multicast *ib; 155 } multicast; 156 struct list_head list; 157 void *context; 158 struct sockaddr_storage addr; 159 struct kref mcref; 160 }; 161 162 struct cma_work { 163 struct work_struct work; 164 struct rdma_id_private *id; 165 enum rdma_cm_state old_state; 166 enum rdma_cm_state new_state; 167 struct rdma_cm_event event; 168 }; 169 170 struct cma_ndev_work { 171 struct work_struct work; 172 struct rdma_id_private *id; 173 struct rdma_cm_event event; 174 }; 175 176 struct iboe_mcast_work { 177 struct work_struct work; 178 struct rdma_id_private *id; 179 struct cma_multicast *mc; 180 }; 181 182 union cma_ip_addr { 183 struct in6_addr ip6; 184 struct { 185 __be32 pad[3]; 186 __be32 addr; 187 } ip4; 188 }; 189 190 struct cma_hdr { 191 u8 cma_version; 192 u8 ip_version; /* IP version: 7:4 */ 193 __be16 port; 194 union cma_ip_addr src_addr; 195 union cma_ip_addr dst_addr; 196 }; 197 198 struct sdp_hh { 199 u8 bsdh[16]; 200 u8 sdp_version; /* Major version: 7:4 */ 201 u8 ip_version; /* IP version: 7:4 */ 202 u8 sdp_specific1[10]; 203 __be16 port; 204 __be16 sdp_specific2; 205 union cma_ip_addr src_addr; 206 union cma_ip_addr dst_addr; 207 }; 208 209 struct sdp_hah { 210 u8 bsdh[16]; 211 u8 sdp_version; 212 }; 213 214 #define CMA_VERSION 0x00 215 #define SDP_MAJ_VERSION 0x2 216 217 static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp) 218 { 219 unsigned long flags; 220 int ret; 221 222 spin_lock_irqsave(&id_priv->lock, flags); 223 ret = (id_priv->state == comp); 224 spin_unlock_irqrestore(&id_priv->lock, flags); 225 return ret; 226 } 227 228 static int cma_comp_exch(struct rdma_id_private *id_priv, 229 enum rdma_cm_state comp, enum rdma_cm_state exch) 230 { 231 unsigned long flags; 232 int ret; 233 234 spin_lock_irqsave(&id_priv->lock, flags); 235 if ((ret = (id_priv->state == comp))) 236 id_priv->state = exch; 237 spin_unlock_irqrestore(&id_priv->lock, flags); 238 return ret; 239 } 240 241 static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv, 242 enum rdma_cm_state exch) 243 { 244 unsigned long flags; 245 enum rdma_cm_state old; 246 247 spin_lock_irqsave(&id_priv->lock, flags); 248 old = id_priv->state; 249 id_priv->state = exch; 250 spin_unlock_irqrestore(&id_priv->lock, flags); 251 return old; 252 } 253 254 static inline u8 cma_get_ip_ver(struct cma_hdr *hdr) 255 { 256 return hdr->ip_version >> 4; 257 } 258 259 static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver) 260 { 261 hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF); 262 } 263 264 static inline u8 sdp_get_majv(u8 sdp_version) 265 { 266 return sdp_version >> 4; 267 } 268 269 static inline u8 sdp_get_ip_ver(struct sdp_hh *hh) 270 { 271 return hh->ip_version >> 4; 272 } 273 274 static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver) 275 { 276 hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF); 277 } 278 279 static void cma_attach_to_dev(struct rdma_id_private *id_priv, 280 struct cma_device *cma_dev) 281 { 282 atomic_inc(&cma_dev->refcount); 283 id_priv->cma_dev = cma_dev; 284 id_priv->id.device = cma_dev->device; 285 id_priv->id.route.addr.dev_addr.transport = 286 rdma_node_get_transport(cma_dev->device->node_type); 287 list_add_tail(&id_priv->list, &cma_dev->id_list); 288 } 289 290 static inline void cma_deref_dev(struct cma_device *cma_dev) 291 { 292 if (atomic_dec_and_test(&cma_dev->refcount)) 293 complete(&cma_dev->comp); 294 } 295 296 static inline void release_mc(struct kref *kref) 297 { 298 struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref); 299 300 kfree(mc->multicast.ib); 301 kfree(mc); 302 } 303 304 static void cma_release_dev(struct rdma_id_private *id_priv) 305 { 306 mutex_lock(&lock); 307 list_del(&id_priv->list); 308 cma_deref_dev(id_priv->cma_dev); 309 id_priv->cma_dev = NULL; 310 mutex_unlock(&lock); 311 } 312 313 static int cma_set_qkey(struct rdma_id_private *id_priv) 314 { 315 struct ib_sa_mcmember_rec rec; 316 int ret = 0; 317 318 if (id_priv->qkey) 319 return 0; 320 321 switch (id_priv->id.ps) { 322 case RDMA_PS_UDP: 323 id_priv->qkey = RDMA_UDP_QKEY; 324 break; 325 case RDMA_PS_IPOIB: 326 ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid); 327 ret = ib_sa_get_mcmember_rec(id_priv->id.device, 328 id_priv->id.port_num, &rec.mgid, 329 &rec); 330 if (!ret) 331 id_priv->qkey = be32_to_cpu(rec.qkey); 332 break; 333 default: 334 break; 335 } 336 return ret; 337 } 338 339 static int find_gid_port(struct ib_device *device, union ib_gid *gid, u8 port_num) 340 { 341 int i; 342 int err; 343 struct ib_port_attr props; 344 union ib_gid tmp; 345 346 err = ib_query_port(device, port_num, &props); 347 if (err) 348 return err; 349 350 for (i = 0; i < props.gid_tbl_len; ++i) { 351 err = ib_query_gid(device, port_num, i, &tmp); 352 if (err) 353 return err; 354 if (!memcmp(&tmp, gid, sizeof tmp)) 355 return 0; 356 } 357 358 return -EADDRNOTAVAIL; 359 } 360 361 static int cma_acquire_dev(struct rdma_id_private *id_priv) 362 { 363 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 364 struct cma_device *cma_dev; 365 union ib_gid gid, iboe_gid; 366 int ret = -ENODEV; 367 u8 port; 368 enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ? 369 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET; 370 371 if (dev_ll != IB_LINK_LAYER_INFINIBAND && 372 id_priv->id.ps == RDMA_PS_IPOIB) 373 return -EINVAL; 374 375 mutex_lock(&lock); 376 iboe_addr_get_sgid(dev_addr, &iboe_gid); 377 memcpy(&gid, dev_addr->src_dev_addr + 378 rdma_addr_gid_offset(dev_addr), sizeof gid); 379 list_for_each_entry(cma_dev, &dev_list, list) { 380 for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) { 381 if (rdma_port_get_link_layer(cma_dev->device, port) == dev_ll) { 382 if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB && 383 rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET) 384 ret = find_gid_port(cma_dev->device, &iboe_gid, port); 385 else 386 ret = find_gid_port(cma_dev->device, &gid, port); 387 388 if (!ret) { 389 id_priv->id.port_num = port; 390 goto out; 391 } 392 } 393 } 394 } 395 396 out: 397 if (!ret) 398 cma_attach_to_dev(id_priv, cma_dev); 399 400 mutex_unlock(&lock); 401 return ret; 402 } 403 404 static void cma_deref_id(struct rdma_id_private *id_priv) 405 { 406 if (atomic_dec_and_test(&id_priv->refcount)) 407 complete(&id_priv->comp); 408 } 409 410 static int cma_disable_callback(struct rdma_id_private *id_priv, 411 enum rdma_cm_state state) 412 { 413 mutex_lock(&id_priv->handler_mutex); 414 if (id_priv->state != state) { 415 mutex_unlock(&id_priv->handler_mutex); 416 return -EINVAL; 417 } 418 return 0; 419 } 420 421 struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, 422 void *context, enum rdma_port_space ps, 423 enum ib_qp_type qp_type) 424 { 425 struct rdma_id_private *id_priv; 426 427 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL); 428 if (!id_priv) 429 return ERR_PTR(-ENOMEM); 430 431 id_priv->owner = task_pid_nr(current); 432 id_priv->state = RDMA_CM_IDLE; 433 id_priv->id.context = context; 434 id_priv->id.event_handler = event_handler; 435 id_priv->id.ps = ps; 436 id_priv->id.qp_type = qp_type; 437 spin_lock_init(&id_priv->lock); 438 mutex_init(&id_priv->qp_mutex); 439 init_completion(&id_priv->comp); 440 atomic_set(&id_priv->refcount, 1); 441 mutex_init(&id_priv->handler_mutex); 442 INIT_LIST_HEAD(&id_priv->listen_list); 443 INIT_LIST_HEAD(&id_priv->mc_list); 444 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); 445 446 return &id_priv->id; 447 } 448 EXPORT_SYMBOL(rdma_create_id); 449 450 static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 451 { 452 struct ib_qp_attr qp_attr; 453 int qp_attr_mask, ret; 454 455 qp_attr.qp_state = IB_QPS_INIT; 456 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 457 if (ret) 458 return ret; 459 460 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); 461 if (ret) 462 return ret; 463 464 qp_attr.qp_state = IB_QPS_RTR; 465 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE); 466 if (ret) 467 return ret; 468 469 qp_attr.qp_state = IB_QPS_RTS; 470 qp_attr.sq_psn = 0; 471 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN); 472 473 return ret; 474 } 475 476 static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 477 { 478 struct ib_qp_attr qp_attr; 479 int qp_attr_mask, ret; 480 481 qp_attr.qp_state = IB_QPS_INIT; 482 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 483 if (ret) 484 return ret; 485 486 return ib_modify_qp(qp, &qp_attr, qp_attr_mask); 487 } 488 489 int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, 490 struct ib_qp_init_attr *qp_init_attr) 491 { 492 struct rdma_id_private *id_priv; 493 struct ib_qp *qp; 494 int ret; 495 496 id_priv = container_of(id, struct rdma_id_private, id); 497 if (id->device != pd->device) 498 return -EINVAL; 499 500 qp = ib_create_qp(pd, qp_init_attr); 501 if (IS_ERR(qp)) 502 return PTR_ERR(qp); 503 504 if (id->qp_type == IB_QPT_UD) 505 ret = cma_init_ud_qp(id_priv, qp); 506 else 507 ret = cma_init_conn_qp(id_priv, qp); 508 if (ret) 509 goto err; 510 511 id->qp = qp; 512 id_priv->qp_num = qp->qp_num; 513 id_priv->srq = (qp->srq != NULL); 514 return 0; 515 err: 516 ib_destroy_qp(qp); 517 return ret; 518 } 519 EXPORT_SYMBOL(rdma_create_qp); 520 521 void rdma_destroy_qp(struct rdma_cm_id *id) 522 { 523 struct rdma_id_private *id_priv; 524 525 id_priv = container_of(id, struct rdma_id_private, id); 526 mutex_lock(&id_priv->qp_mutex); 527 ib_destroy_qp(id_priv->id.qp); 528 id_priv->id.qp = NULL; 529 mutex_unlock(&id_priv->qp_mutex); 530 } 531 EXPORT_SYMBOL(rdma_destroy_qp); 532 533 static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, 534 struct rdma_conn_param *conn_param) 535 { 536 struct ib_qp_attr qp_attr; 537 int qp_attr_mask, ret; 538 539 mutex_lock(&id_priv->qp_mutex); 540 if (!id_priv->id.qp) { 541 ret = 0; 542 goto out; 543 } 544 545 /* Need to update QP attributes from default values. */ 546 qp_attr.qp_state = IB_QPS_INIT; 547 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 548 if (ret) 549 goto out; 550 551 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 552 if (ret) 553 goto out; 554 555 qp_attr.qp_state = IB_QPS_RTR; 556 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 557 if (ret) 558 goto out; 559 560 if (conn_param) 561 qp_attr.max_dest_rd_atomic = conn_param->responder_resources; 562 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 563 out: 564 mutex_unlock(&id_priv->qp_mutex); 565 return ret; 566 } 567 568 static int cma_modify_qp_rts(struct rdma_id_private *id_priv, 569 struct rdma_conn_param *conn_param) 570 { 571 struct ib_qp_attr qp_attr; 572 int qp_attr_mask, ret; 573 574 mutex_lock(&id_priv->qp_mutex); 575 if (!id_priv->id.qp) { 576 ret = 0; 577 goto out; 578 } 579 580 qp_attr.qp_state = IB_QPS_RTS; 581 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 582 if (ret) 583 goto out; 584 585 if (conn_param) 586 qp_attr.max_rd_atomic = conn_param->initiator_depth; 587 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 588 out: 589 mutex_unlock(&id_priv->qp_mutex); 590 return ret; 591 } 592 593 static int cma_modify_qp_err(struct rdma_id_private *id_priv) 594 { 595 struct ib_qp_attr qp_attr; 596 int ret; 597 598 mutex_lock(&id_priv->qp_mutex); 599 if (!id_priv->id.qp) { 600 ret = 0; 601 goto out; 602 } 603 604 qp_attr.qp_state = IB_QPS_ERR; 605 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE); 606 out: 607 mutex_unlock(&id_priv->qp_mutex); 608 return ret; 609 } 610 611 static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, 612 struct ib_qp_attr *qp_attr, int *qp_attr_mask) 613 { 614 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 615 int ret; 616 u16 pkey; 617 618 if (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num) == 619 IB_LINK_LAYER_INFINIBAND) 620 pkey = ib_addr_get_pkey(dev_addr); 621 else 622 pkey = 0xffff; 623 624 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, 625 pkey, &qp_attr->pkey_index); 626 if (ret) 627 return ret; 628 629 qp_attr->port_num = id_priv->id.port_num; 630 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; 631 632 if (id_priv->id.qp_type == IB_QPT_UD) { 633 ret = cma_set_qkey(id_priv); 634 if (ret) 635 return ret; 636 637 qp_attr->qkey = id_priv->qkey; 638 *qp_attr_mask |= IB_QP_QKEY; 639 } else { 640 qp_attr->qp_access_flags = 0; 641 *qp_attr_mask |= IB_QP_ACCESS_FLAGS; 642 } 643 return 0; 644 } 645 646 int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, 647 int *qp_attr_mask) 648 { 649 struct rdma_id_private *id_priv; 650 int ret = 0; 651 652 id_priv = container_of(id, struct rdma_id_private, id); 653 switch (rdma_node_get_transport(id_priv->id.device->node_type)) { 654 case RDMA_TRANSPORT_IB: 655 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) 656 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); 657 else 658 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, 659 qp_attr_mask); 660 if (qp_attr->qp_state == IB_QPS_RTR) 661 qp_attr->rq_psn = id_priv->seq_num; 662 break; 663 case RDMA_TRANSPORT_IWARP: 664 if (!id_priv->cm_id.iw) { 665 qp_attr->qp_access_flags = 0; 666 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; 667 } else 668 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, 669 qp_attr_mask); 670 break; 671 default: 672 ret = -ENOSYS; 673 break; 674 } 675 676 return ret; 677 } 678 EXPORT_SYMBOL(rdma_init_qp_attr); 679 680 static inline int cma_zero_addr(struct sockaddr *addr) 681 { 682 struct in6_addr *ip6; 683 684 if (addr->sa_family == AF_INET) 685 return ipv4_is_zeronet( 686 ((struct sockaddr_in *)addr)->sin_addr.s_addr); 687 else { 688 ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr; 689 return (ip6->s6_addr32[0] | ip6->s6_addr32[1] | 690 ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0; 691 } 692 } 693 694 static inline int cma_loopback_addr(struct sockaddr *addr) 695 { 696 if (addr->sa_family == AF_INET) 697 return ipv4_is_loopback( 698 ((struct sockaddr_in *) addr)->sin_addr.s_addr); 699 else 700 return ipv6_addr_loopback( 701 &((struct sockaddr_in6 *) addr)->sin6_addr); 702 } 703 704 static inline int cma_any_addr(struct sockaddr *addr) 705 { 706 return cma_zero_addr(addr) || cma_loopback_addr(addr); 707 } 708 709 static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst) 710 { 711 if (src->sa_family != dst->sa_family) 712 return -1; 713 714 switch (src->sa_family) { 715 case AF_INET: 716 return ((struct sockaddr_in *) src)->sin_addr.s_addr != 717 ((struct sockaddr_in *) dst)->sin_addr.s_addr; 718 default: 719 return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr, 720 &((struct sockaddr_in6 *) dst)->sin6_addr); 721 } 722 } 723 724 static inline __be16 cma_port(struct sockaddr *addr) 725 { 726 if (addr->sa_family == AF_INET) 727 return ((struct sockaddr_in *) addr)->sin_port; 728 else 729 return ((struct sockaddr_in6 *) addr)->sin6_port; 730 } 731 732 static inline int cma_any_port(struct sockaddr *addr) 733 { 734 return !cma_port(addr); 735 } 736 737 static int cma_get_net_info(void *hdr, enum rdma_port_space ps, 738 u8 *ip_ver, __be16 *port, 739 union cma_ip_addr **src, union cma_ip_addr **dst) 740 { 741 switch (ps) { 742 case RDMA_PS_SDP: 743 if (sdp_get_majv(((struct sdp_hh *) hdr)->sdp_version) != 744 SDP_MAJ_VERSION) 745 return -EINVAL; 746 747 *ip_ver = sdp_get_ip_ver(hdr); 748 *port = ((struct sdp_hh *) hdr)->port; 749 *src = &((struct sdp_hh *) hdr)->src_addr; 750 *dst = &((struct sdp_hh *) hdr)->dst_addr; 751 break; 752 default: 753 if (((struct cma_hdr *) hdr)->cma_version != CMA_VERSION) 754 return -EINVAL; 755 756 *ip_ver = cma_get_ip_ver(hdr); 757 *port = ((struct cma_hdr *) hdr)->port; 758 *src = &((struct cma_hdr *) hdr)->src_addr; 759 *dst = &((struct cma_hdr *) hdr)->dst_addr; 760 break; 761 } 762 763 if (*ip_ver != 4 && *ip_ver != 6) 764 return -EINVAL; 765 return 0; 766 } 767 768 static void cma_save_net_info(struct rdma_addr *addr, 769 struct rdma_addr *listen_addr, 770 u8 ip_ver, __be16 port, 771 union cma_ip_addr *src, union cma_ip_addr *dst) 772 { 773 struct sockaddr_in *listen4, *ip4; 774 struct sockaddr_in6 *listen6, *ip6; 775 776 switch (ip_ver) { 777 case 4: 778 listen4 = (struct sockaddr_in *) &listen_addr->src_addr; 779 ip4 = (struct sockaddr_in *) &addr->src_addr; 780 ip4->sin_family = listen4->sin_family; 781 ip4->sin_addr.s_addr = dst->ip4.addr; 782 ip4->sin_port = listen4->sin_port; 783 784 ip4 = (struct sockaddr_in *) &addr->dst_addr; 785 ip4->sin_family = listen4->sin_family; 786 ip4->sin_addr.s_addr = src->ip4.addr; 787 ip4->sin_port = port; 788 break; 789 case 6: 790 listen6 = (struct sockaddr_in6 *) &listen_addr->src_addr; 791 ip6 = (struct sockaddr_in6 *) &addr->src_addr; 792 ip6->sin6_family = listen6->sin6_family; 793 ip6->sin6_addr = dst->ip6; 794 ip6->sin6_port = listen6->sin6_port; 795 796 ip6 = (struct sockaddr_in6 *) &addr->dst_addr; 797 ip6->sin6_family = listen6->sin6_family; 798 ip6->sin6_addr = src->ip6; 799 ip6->sin6_port = port; 800 break; 801 default: 802 break; 803 } 804 } 805 806 static inline int cma_user_data_offset(enum rdma_port_space ps) 807 { 808 switch (ps) { 809 case RDMA_PS_SDP: 810 return 0; 811 default: 812 return sizeof(struct cma_hdr); 813 } 814 } 815 816 static void cma_cancel_route(struct rdma_id_private *id_priv) 817 { 818 switch (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)) { 819 case IB_LINK_LAYER_INFINIBAND: 820 if (id_priv->query) 821 ib_sa_cancel_query(id_priv->query_id, id_priv->query); 822 break; 823 default: 824 break; 825 } 826 } 827 828 static void cma_cancel_listens(struct rdma_id_private *id_priv) 829 { 830 struct rdma_id_private *dev_id_priv; 831 832 /* 833 * Remove from listen_any_list to prevent added devices from spawning 834 * additional listen requests. 835 */ 836 mutex_lock(&lock); 837 list_del(&id_priv->list); 838 839 while (!list_empty(&id_priv->listen_list)) { 840 dev_id_priv = list_entry(id_priv->listen_list.next, 841 struct rdma_id_private, listen_list); 842 /* sync with device removal to avoid duplicate destruction */ 843 list_del_init(&dev_id_priv->list); 844 list_del(&dev_id_priv->listen_list); 845 mutex_unlock(&lock); 846 847 rdma_destroy_id(&dev_id_priv->id); 848 mutex_lock(&lock); 849 } 850 mutex_unlock(&lock); 851 } 852 853 static void cma_cancel_operation(struct rdma_id_private *id_priv, 854 enum rdma_cm_state state) 855 { 856 switch (state) { 857 case RDMA_CM_ADDR_QUERY: 858 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); 859 break; 860 case RDMA_CM_ROUTE_QUERY: 861 cma_cancel_route(id_priv); 862 break; 863 case RDMA_CM_LISTEN: 864 if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr) 865 && !id_priv->cma_dev) 866 cma_cancel_listens(id_priv); 867 break; 868 default: 869 break; 870 } 871 } 872 873 static void cma_release_port(struct rdma_id_private *id_priv) 874 { 875 struct rdma_bind_list *bind_list = id_priv->bind_list; 876 877 if (!bind_list) 878 return; 879 880 mutex_lock(&lock); 881 hlist_del(&id_priv->node); 882 if (hlist_empty(&bind_list->owners)) { 883 idr_remove(bind_list->ps, bind_list->port); 884 kfree(bind_list); 885 } 886 mutex_unlock(&lock); 887 } 888 889 static void cma_leave_mc_groups(struct rdma_id_private *id_priv) 890 { 891 struct cma_multicast *mc; 892 893 while (!list_empty(&id_priv->mc_list)) { 894 mc = container_of(id_priv->mc_list.next, 895 struct cma_multicast, list); 896 list_del(&mc->list); 897 switch (rdma_port_get_link_layer(id_priv->cma_dev->device, id_priv->id.port_num)) { 898 case IB_LINK_LAYER_INFINIBAND: 899 ib_sa_free_multicast(mc->multicast.ib); 900 kfree(mc); 901 break; 902 case IB_LINK_LAYER_ETHERNET: 903 kref_put(&mc->mcref, release_mc); 904 break; 905 default: 906 break; 907 } 908 } 909 } 910 911 void rdma_destroy_id(struct rdma_cm_id *id) 912 { 913 struct rdma_id_private *id_priv; 914 enum rdma_cm_state state; 915 916 id_priv = container_of(id, struct rdma_id_private, id); 917 state = cma_exch(id_priv, RDMA_CM_DESTROYING); 918 cma_cancel_operation(id_priv, state); 919 920 /* 921 * Wait for any active callback to finish. New callbacks will find 922 * the id_priv state set to destroying and abort. 923 */ 924 mutex_lock(&id_priv->handler_mutex); 925 mutex_unlock(&id_priv->handler_mutex); 926 927 if (id_priv->cma_dev) { 928 switch (rdma_node_get_transport(id_priv->id.device->node_type)) { 929 case RDMA_TRANSPORT_IB: 930 if (id_priv->cm_id.ib) 931 ib_destroy_cm_id(id_priv->cm_id.ib); 932 break; 933 case RDMA_TRANSPORT_IWARP: 934 if (id_priv->cm_id.iw) 935 iw_destroy_cm_id(id_priv->cm_id.iw); 936 break; 937 default: 938 break; 939 } 940 cma_leave_mc_groups(id_priv); 941 cma_release_dev(id_priv); 942 } 943 944 cma_release_port(id_priv); 945 cma_deref_id(id_priv); 946 wait_for_completion(&id_priv->comp); 947 948 if (id_priv->internal_id) 949 cma_deref_id(id_priv->id.context); 950 951 kfree(id_priv->id.route.path_rec); 952 kfree(id_priv); 953 } 954 EXPORT_SYMBOL(rdma_destroy_id); 955 956 static int cma_rep_recv(struct rdma_id_private *id_priv) 957 { 958 int ret; 959 960 ret = cma_modify_qp_rtr(id_priv, NULL); 961 if (ret) 962 goto reject; 963 964 ret = cma_modify_qp_rts(id_priv, NULL); 965 if (ret) 966 goto reject; 967 968 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0); 969 if (ret) 970 goto reject; 971 972 return 0; 973 reject: 974 cma_modify_qp_err(id_priv); 975 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, 976 NULL, 0, NULL, 0); 977 return ret; 978 } 979 980 static int cma_verify_rep(struct rdma_id_private *id_priv, void *data) 981 { 982 if (id_priv->id.ps == RDMA_PS_SDP && 983 sdp_get_majv(((struct sdp_hah *) data)->sdp_version) != 984 SDP_MAJ_VERSION) 985 return -EINVAL; 986 987 return 0; 988 } 989 990 static void cma_set_rep_event_data(struct rdma_cm_event *event, 991 struct ib_cm_rep_event_param *rep_data, 992 void *private_data) 993 { 994 event->param.conn.private_data = private_data; 995 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE; 996 event->param.conn.responder_resources = rep_data->responder_resources; 997 event->param.conn.initiator_depth = rep_data->initiator_depth; 998 event->param.conn.flow_control = rep_data->flow_control; 999 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count; 1000 event->param.conn.srq = rep_data->srq; 1001 event->param.conn.qp_num = rep_data->remote_qpn; 1002 } 1003 1004 static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 1005 { 1006 struct rdma_id_private *id_priv = cm_id->context; 1007 struct rdma_cm_event event; 1008 int ret = 0; 1009 1010 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && 1011 cma_disable_callback(id_priv, RDMA_CM_CONNECT)) || 1012 (ib_event->event == IB_CM_TIMEWAIT_EXIT && 1013 cma_disable_callback(id_priv, RDMA_CM_DISCONNECT))) 1014 return 0; 1015 1016 memset(&event, 0, sizeof event); 1017 switch (ib_event->event) { 1018 case IB_CM_REQ_ERROR: 1019 case IB_CM_REP_ERROR: 1020 event.event = RDMA_CM_EVENT_UNREACHABLE; 1021 event.status = -ETIMEDOUT; 1022 break; 1023 case IB_CM_REP_RECEIVED: 1024 event.status = cma_verify_rep(id_priv, ib_event->private_data); 1025 if (event.status) 1026 event.event = RDMA_CM_EVENT_CONNECT_ERROR; 1027 else if (id_priv->id.qp && id_priv->id.ps != RDMA_PS_SDP) { 1028 event.status = cma_rep_recv(id_priv); 1029 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR : 1030 RDMA_CM_EVENT_ESTABLISHED; 1031 } else 1032 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE; 1033 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd, 1034 ib_event->private_data); 1035 break; 1036 case IB_CM_RTU_RECEIVED: 1037 case IB_CM_USER_ESTABLISHED: 1038 event.event = RDMA_CM_EVENT_ESTABLISHED; 1039 break; 1040 case IB_CM_DREQ_ERROR: 1041 event.status = -ETIMEDOUT; /* fall through */ 1042 case IB_CM_DREQ_RECEIVED: 1043 case IB_CM_DREP_RECEIVED: 1044 if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT, 1045 RDMA_CM_DISCONNECT)) 1046 goto out; 1047 event.event = RDMA_CM_EVENT_DISCONNECTED; 1048 break; 1049 case IB_CM_TIMEWAIT_EXIT: 1050 event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT; 1051 break; 1052 case IB_CM_MRA_RECEIVED: 1053 /* ignore event */ 1054 goto out; 1055 case IB_CM_REJ_RECEIVED: 1056 cma_modify_qp_err(id_priv); 1057 event.status = ib_event->param.rej_rcvd.reason; 1058 event.event = RDMA_CM_EVENT_REJECTED; 1059 event.param.conn.private_data = ib_event->private_data; 1060 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; 1061 break; 1062 default: 1063 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n", 1064 ib_event->event); 1065 goto out; 1066 } 1067 1068 ret = id_priv->id.event_handler(&id_priv->id, &event); 1069 if (ret) { 1070 /* Destroy the CM ID by returning a non-zero value. */ 1071 id_priv->cm_id.ib = NULL; 1072 cma_exch(id_priv, RDMA_CM_DESTROYING); 1073 mutex_unlock(&id_priv->handler_mutex); 1074 rdma_destroy_id(&id_priv->id); 1075 return ret; 1076 } 1077 out: 1078 mutex_unlock(&id_priv->handler_mutex); 1079 return ret; 1080 } 1081 1082 static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, 1083 struct ib_cm_event *ib_event) 1084 { 1085 struct rdma_id_private *id_priv; 1086 struct rdma_cm_id *id; 1087 struct rdma_route *rt; 1088 union cma_ip_addr *src, *dst; 1089 __be16 port; 1090 u8 ip_ver; 1091 int ret; 1092 1093 if (cma_get_net_info(ib_event->private_data, listen_id->ps, 1094 &ip_ver, &port, &src, &dst)) 1095 return NULL; 1096 1097 id = rdma_create_id(listen_id->event_handler, listen_id->context, 1098 listen_id->ps, ib_event->param.req_rcvd.qp_type); 1099 if (IS_ERR(id)) 1100 return NULL; 1101 1102 cma_save_net_info(&id->route.addr, &listen_id->route.addr, 1103 ip_ver, port, src, dst); 1104 1105 rt = &id->route; 1106 rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1; 1107 rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths, 1108 GFP_KERNEL); 1109 if (!rt->path_rec) 1110 goto err; 1111 1112 rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path; 1113 if (rt->num_paths == 2) 1114 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; 1115 1116 if (cma_any_addr((struct sockaddr *) &rt->addr.src_addr)) { 1117 rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND; 1118 rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); 1119 ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey)); 1120 } else { 1121 ret = rdma_translate_ip((struct sockaddr *) &rt->addr.src_addr, 1122 &rt->addr.dev_addr); 1123 if (ret) 1124 goto err; 1125 } 1126 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); 1127 1128 id_priv = container_of(id, struct rdma_id_private, id); 1129 id_priv->state = RDMA_CM_CONNECT; 1130 return id_priv; 1131 1132 err: 1133 rdma_destroy_id(id); 1134 return NULL; 1135 } 1136 1137 static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id, 1138 struct ib_cm_event *ib_event) 1139 { 1140 struct rdma_id_private *id_priv; 1141 struct rdma_cm_id *id; 1142 union cma_ip_addr *src, *dst; 1143 __be16 port; 1144 u8 ip_ver; 1145 int ret; 1146 1147 id = rdma_create_id(listen_id->event_handler, listen_id->context, 1148 listen_id->ps, IB_QPT_UD); 1149 if (IS_ERR(id)) 1150 return NULL; 1151 1152 1153 if (cma_get_net_info(ib_event->private_data, listen_id->ps, 1154 &ip_ver, &port, &src, &dst)) 1155 goto err; 1156 1157 cma_save_net_info(&id->route.addr, &listen_id->route.addr, 1158 ip_ver, port, src, dst); 1159 1160 if (!cma_any_addr((struct sockaddr *) &id->route.addr.src_addr)) { 1161 ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr, 1162 &id->route.addr.dev_addr); 1163 if (ret) 1164 goto err; 1165 } 1166 1167 id_priv = container_of(id, struct rdma_id_private, id); 1168 id_priv->state = RDMA_CM_CONNECT; 1169 return id_priv; 1170 err: 1171 rdma_destroy_id(id); 1172 return NULL; 1173 } 1174 1175 static void cma_set_req_event_data(struct rdma_cm_event *event, 1176 struct ib_cm_req_event_param *req_data, 1177 void *private_data, int offset) 1178 { 1179 event->param.conn.private_data = private_data + offset; 1180 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset; 1181 event->param.conn.responder_resources = req_data->responder_resources; 1182 event->param.conn.initiator_depth = req_data->initiator_depth; 1183 event->param.conn.flow_control = req_data->flow_control; 1184 event->param.conn.retry_count = req_data->retry_count; 1185 event->param.conn.rnr_retry_count = req_data->rnr_retry_count; 1186 event->param.conn.srq = req_data->srq; 1187 event->param.conn.qp_num = req_data->remote_qpn; 1188 } 1189 1190 static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event) 1191 { 1192 return (((ib_event->event == IB_CM_REQ_RECEIVED) && 1193 (ib_event->param.req_rcvd.qp_type == id->qp_type)) || 1194 ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) && 1195 (id->qp_type == IB_QPT_UD)) || 1196 (!id->qp_type)); 1197 } 1198 1199 static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 1200 { 1201 struct rdma_id_private *listen_id, *conn_id; 1202 struct rdma_cm_event event; 1203 int offset, ret; 1204 1205 listen_id = cm_id->context; 1206 if (!cma_check_req_qp_type(&listen_id->id, ib_event)) 1207 return -EINVAL; 1208 1209 if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) 1210 return -ECONNABORTED; 1211 1212 memset(&event, 0, sizeof event); 1213 offset = cma_user_data_offset(listen_id->id.ps); 1214 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 1215 if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) { 1216 conn_id = cma_new_udp_id(&listen_id->id, ib_event); 1217 event.param.ud.private_data = ib_event->private_data + offset; 1218 event.param.ud.private_data_len = 1219 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset; 1220 } else { 1221 conn_id = cma_new_conn_id(&listen_id->id, ib_event); 1222 cma_set_req_event_data(&event, &ib_event->param.req_rcvd, 1223 ib_event->private_data, offset); 1224 } 1225 if (!conn_id) { 1226 ret = -ENOMEM; 1227 goto err1; 1228 } 1229 1230 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 1231 ret = cma_acquire_dev(conn_id); 1232 if (ret) 1233 goto err2; 1234 1235 conn_id->cm_id.ib = cm_id; 1236 cm_id->context = conn_id; 1237 cm_id->cm_handler = cma_ib_handler; 1238 1239 /* 1240 * Protect against the user destroying conn_id from another thread 1241 * until we're done accessing it. 1242 */ 1243 atomic_inc(&conn_id->refcount); 1244 ret = conn_id->id.event_handler(&conn_id->id, &event); 1245 if (ret) 1246 goto err3; 1247 1248 /* 1249 * Acquire mutex to prevent user executing rdma_destroy_id() 1250 * while we're accessing the cm_id. 1251 */ 1252 mutex_lock(&lock); 1253 if (cma_comp(conn_id, RDMA_CM_CONNECT) && (conn_id->id.qp_type != IB_QPT_UD)) 1254 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); 1255 mutex_unlock(&lock); 1256 mutex_unlock(&conn_id->handler_mutex); 1257 mutex_unlock(&listen_id->handler_mutex); 1258 cma_deref_id(conn_id); 1259 return 0; 1260 1261 err3: 1262 cma_deref_id(conn_id); 1263 /* Destroy the CM ID by returning a non-zero value. */ 1264 conn_id->cm_id.ib = NULL; 1265 err2: 1266 cma_exch(conn_id, RDMA_CM_DESTROYING); 1267 mutex_unlock(&conn_id->handler_mutex); 1268 err1: 1269 mutex_unlock(&listen_id->handler_mutex); 1270 if (conn_id) 1271 rdma_destroy_id(&conn_id->id); 1272 return ret; 1273 } 1274 1275 static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr) 1276 { 1277 return cpu_to_be64(((u64)ps << 16) + be16_to_cpu(cma_port(addr))); 1278 } 1279 1280 static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr, 1281 struct ib_cm_compare_data *compare) 1282 { 1283 struct cma_hdr *cma_data, *cma_mask; 1284 struct sdp_hh *sdp_data, *sdp_mask; 1285 __be32 ip4_addr; 1286 struct in6_addr ip6_addr; 1287 1288 memset(compare, 0, sizeof *compare); 1289 cma_data = (void *) compare->data; 1290 cma_mask = (void *) compare->mask; 1291 sdp_data = (void *) compare->data; 1292 sdp_mask = (void *) compare->mask; 1293 1294 switch (addr->sa_family) { 1295 case AF_INET: 1296 ip4_addr = ((struct sockaddr_in *) addr)->sin_addr.s_addr; 1297 if (ps == RDMA_PS_SDP) { 1298 sdp_set_ip_ver(sdp_data, 4); 1299 sdp_set_ip_ver(sdp_mask, 0xF); 1300 sdp_data->dst_addr.ip4.addr = ip4_addr; 1301 sdp_mask->dst_addr.ip4.addr = htonl(~0); 1302 } else { 1303 cma_set_ip_ver(cma_data, 4); 1304 cma_set_ip_ver(cma_mask, 0xF); 1305 if (!cma_any_addr(addr)) { 1306 cma_data->dst_addr.ip4.addr = ip4_addr; 1307 cma_mask->dst_addr.ip4.addr = htonl(~0); 1308 } 1309 } 1310 break; 1311 case AF_INET6: 1312 ip6_addr = ((struct sockaddr_in6 *) addr)->sin6_addr; 1313 if (ps == RDMA_PS_SDP) { 1314 sdp_set_ip_ver(sdp_data, 6); 1315 sdp_set_ip_ver(sdp_mask, 0xF); 1316 sdp_data->dst_addr.ip6 = ip6_addr; 1317 memset(&sdp_mask->dst_addr.ip6, 0xFF, 1318 sizeof sdp_mask->dst_addr.ip6); 1319 } else { 1320 cma_set_ip_ver(cma_data, 6); 1321 cma_set_ip_ver(cma_mask, 0xF); 1322 if (!cma_any_addr(addr)) { 1323 cma_data->dst_addr.ip6 = ip6_addr; 1324 memset(&cma_mask->dst_addr.ip6, 0xFF, 1325 sizeof cma_mask->dst_addr.ip6); 1326 } 1327 } 1328 break; 1329 default: 1330 break; 1331 } 1332 } 1333 1334 static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) 1335 { 1336 struct rdma_id_private *id_priv = iw_id->context; 1337 struct rdma_cm_event event; 1338 struct sockaddr_in *sin; 1339 int ret = 0; 1340 1341 if (cma_disable_callback(id_priv, RDMA_CM_CONNECT)) 1342 return 0; 1343 1344 memset(&event, 0, sizeof event); 1345 switch (iw_event->event) { 1346 case IW_CM_EVENT_CLOSE: 1347 event.event = RDMA_CM_EVENT_DISCONNECTED; 1348 break; 1349 case IW_CM_EVENT_CONNECT_REPLY: 1350 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 1351 *sin = iw_event->local_addr; 1352 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr; 1353 *sin = iw_event->remote_addr; 1354 switch (iw_event->status) { 1355 case 0: 1356 event.event = RDMA_CM_EVENT_ESTABLISHED; 1357 event.param.conn.initiator_depth = iw_event->ird; 1358 event.param.conn.responder_resources = iw_event->ord; 1359 break; 1360 case -ECONNRESET: 1361 case -ECONNREFUSED: 1362 event.event = RDMA_CM_EVENT_REJECTED; 1363 break; 1364 case -ETIMEDOUT: 1365 event.event = RDMA_CM_EVENT_UNREACHABLE; 1366 break; 1367 default: 1368 event.event = RDMA_CM_EVENT_CONNECT_ERROR; 1369 break; 1370 } 1371 break; 1372 case IW_CM_EVENT_ESTABLISHED: 1373 event.event = RDMA_CM_EVENT_ESTABLISHED; 1374 event.param.conn.initiator_depth = iw_event->ird; 1375 event.param.conn.responder_resources = iw_event->ord; 1376 break; 1377 default: 1378 BUG_ON(1); 1379 } 1380 1381 event.status = iw_event->status; 1382 event.param.conn.private_data = iw_event->private_data; 1383 event.param.conn.private_data_len = iw_event->private_data_len; 1384 ret = id_priv->id.event_handler(&id_priv->id, &event); 1385 if (ret) { 1386 /* Destroy the CM ID by returning a non-zero value. */ 1387 id_priv->cm_id.iw = NULL; 1388 cma_exch(id_priv, RDMA_CM_DESTROYING); 1389 mutex_unlock(&id_priv->handler_mutex); 1390 rdma_destroy_id(&id_priv->id); 1391 return ret; 1392 } 1393 1394 mutex_unlock(&id_priv->handler_mutex); 1395 return ret; 1396 } 1397 1398 static int iw_conn_req_handler(struct iw_cm_id *cm_id, 1399 struct iw_cm_event *iw_event) 1400 { 1401 struct rdma_cm_id *new_cm_id; 1402 struct rdma_id_private *listen_id, *conn_id; 1403 struct sockaddr_in *sin; 1404 struct net_device *dev = NULL; 1405 struct rdma_cm_event event; 1406 int ret; 1407 struct ib_device_attr attr; 1408 1409 listen_id = cm_id->context; 1410 if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) 1411 return -ECONNABORTED; 1412 1413 /* Create a new RDMA id for the new IW CM ID */ 1414 new_cm_id = rdma_create_id(listen_id->id.event_handler, 1415 listen_id->id.context, 1416 RDMA_PS_TCP, IB_QPT_RC); 1417 if (IS_ERR(new_cm_id)) { 1418 ret = -ENOMEM; 1419 goto out; 1420 } 1421 conn_id = container_of(new_cm_id, struct rdma_id_private, id); 1422 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 1423 conn_id->state = RDMA_CM_CONNECT; 1424 1425 dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr); 1426 if (!dev) { 1427 ret = -EADDRNOTAVAIL; 1428 mutex_unlock(&conn_id->handler_mutex); 1429 rdma_destroy_id(new_cm_id); 1430 goto out; 1431 } 1432 ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL); 1433 if (ret) { 1434 mutex_unlock(&conn_id->handler_mutex); 1435 rdma_destroy_id(new_cm_id); 1436 goto out; 1437 } 1438 1439 ret = cma_acquire_dev(conn_id); 1440 if (ret) { 1441 mutex_unlock(&conn_id->handler_mutex); 1442 rdma_destroy_id(new_cm_id); 1443 goto out; 1444 } 1445 1446 conn_id->cm_id.iw = cm_id; 1447 cm_id->context = conn_id; 1448 cm_id->cm_handler = cma_iw_handler; 1449 1450 sin = (struct sockaddr_in *) &new_cm_id->route.addr.src_addr; 1451 *sin = iw_event->local_addr; 1452 sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr; 1453 *sin = iw_event->remote_addr; 1454 1455 ret = ib_query_device(conn_id->id.device, &attr); 1456 if (ret) { 1457 mutex_unlock(&conn_id->handler_mutex); 1458 rdma_destroy_id(new_cm_id); 1459 goto out; 1460 } 1461 1462 memset(&event, 0, sizeof event); 1463 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 1464 event.param.conn.private_data = iw_event->private_data; 1465 event.param.conn.private_data_len = iw_event->private_data_len; 1466 event.param.conn.initiator_depth = iw_event->ird; 1467 event.param.conn.responder_resources = iw_event->ord; 1468 1469 /* 1470 * Protect against the user destroying conn_id from another thread 1471 * until we're done accessing it. 1472 */ 1473 atomic_inc(&conn_id->refcount); 1474 ret = conn_id->id.event_handler(&conn_id->id, &event); 1475 if (ret) { 1476 /* User wants to destroy the CM ID */ 1477 conn_id->cm_id.iw = NULL; 1478 cma_exch(conn_id, RDMA_CM_DESTROYING); 1479 mutex_unlock(&conn_id->handler_mutex); 1480 cma_deref_id(conn_id); 1481 rdma_destroy_id(&conn_id->id); 1482 goto out; 1483 } 1484 1485 mutex_unlock(&conn_id->handler_mutex); 1486 cma_deref_id(conn_id); 1487 1488 out: 1489 if (dev) 1490 dev_put(dev); 1491 mutex_unlock(&listen_id->handler_mutex); 1492 return ret; 1493 } 1494 1495 static int cma_ib_listen(struct rdma_id_private *id_priv) 1496 { 1497 struct ib_cm_compare_data compare_data; 1498 struct sockaddr *addr; 1499 struct ib_cm_id *id; 1500 __be64 svc_id; 1501 int ret; 1502 1503 id = ib_create_cm_id(id_priv->id.device, cma_req_handler, id_priv); 1504 if (IS_ERR(id)) 1505 return PTR_ERR(id); 1506 1507 id_priv->cm_id.ib = id; 1508 1509 addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr; 1510 svc_id = cma_get_service_id(id_priv->id.ps, addr); 1511 if (cma_any_addr(addr) && !id_priv->afonly) 1512 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL); 1513 else { 1514 cma_set_compare_data(id_priv->id.ps, addr, &compare_data); 1515 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, &compare_data); 1516 } 1517 1518 if (ret) { 1519 ib_destroy_cm_id(id_priv->cm_id.ib); 1520 id_priv->cm_id.ib = NULL; 1521 } 1522 1523 return ret; 1524 } 1525 1526 static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) 1527 { 1528 int ret; 1529 struct sockaddr_in *sin; 1530 struct iw_cm_id *id; 1531 1532 id = iw_create_cm_id(id_priv->id.device, 1533 iw_conn_req_handler, 1534 id_priv); 1535 if (IS_ERR(id)) 1536 return PTR_ERR(id); 1537 1538 id_priv->cm_id.iw = id; 1539 1540 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 1541 id_priv->cm_id.iw->local_addr = *sin; 1542 1543 ret = iw_cm_listen(id_priv->cm_id.iw, backlog); 1544 1545 if (ret) { 1546 iw_destroy_cm_id(id_priv->cm_id.iw); 1547 id_priv->cm_id.iw = NULL; 1548 } 1549 1550 return ret; 1551 } 1552 1553 static int cma_listen_handler(struct rdma_cm_id *id, 1554 struct rdma_cm_event *event) 1555 { 1556 struct rdma_id_private *id_priv = id->context; 1557 1558 id->context = id_priv->id.context; 1559 id->event_handler = id_priv->id.event_handler; 1560 return id_priv->id.event_handler(id, event); 1561 } 1562 1563 static void cma_listen_on_dev(struct rdma_id_private *id_priv, 1564 struct cma_device *cma_dev) 1565 { 1566 struct rdma_id_private *dev_id_priv; 1567 struct rdma_cm_id *id; 1568 int ret; 1569 1570 id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps, 1571 id_priv->id.qp_type); 1572 if (IS_ERR(id)) 1573 return; 1574 1575 dev_id_priv = container_of(id, struct rdma_id_private, id); 1576 1577 dev_id_priv->state = RDMA_CM_ADDR_BOUND; 1578 memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr, 1579 ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr)); 1580 1581 cma_attach_to_dev(dev_id_priv, cma_dev); 1582 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); 1583 atomic_inc(&id_priv->refcount); 1584 dev_id_priv->internal_id = 1; 1585 dev_id_priv->afonly = id_priv->afonly; 1586 1587 ret = rdma_listen(id, id_priv->backlog); 1588 if (ret) 1589 printk(KERN_WARNING "RDMA CMA: cma_listen_on_dev, error %d, " 1590 "listening on device %s\n", ret, cma_dev->device->name); 1591 } 1592 1593 static void cma_listen_on_all(struct rdma_id_private *id_priv) 1594 { 1595 struct cma_device *cma_dev; 1596 1597 mutex_lock(&lock); 1598 list_add_tail(&id_priv->list, &listen_any_list); 1599 list_for_each_entry(cma_dev, &dev_list, list) 1600 cma_listen_on_dev(id_priv, cma_dev); 1601 mutex_unlock(&lock); 1602 } 1603 1604 void rdma_set_service_type(struct rdma_cm_id *id, int tos) 1605 { 1606 struct rdma_id_private *id_priv; 1607 1608 id_priv = container_of(id, struct rdma_id_private, id); 1609 id_priv->tos = (u8) tos; 1610 } 1611 EXPORT_SYMBOL(rdma_set_service_type); 1612 1613 static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec, 1614 void *context) 1615 { 1616 struct cma_work *work = context; 1617 struct rdma_route *route; 1618 1619 route = &work->id->id.route; 1620 1621 if (!status) { 1622 route->num_paths = 1; 1623 *route->path_rec = *path_rec; 1624 } else { 1625 work->old_state = RDMA_CM_ROUTE_QUERY; 1626 work->new_state = RDMA_CM_ADDR_RESOLVED; 1627 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; 1628 work->event.status = status; 1629 } 1630 1631 queue_work(cma_wq, &work->work); 1632 } 1633 1634 static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, 1635 struct cma_work *work) 1636 { 1637 struct rdma_addr *addr = &id_priv->id.route.addr; 1638 struct ib_sa_path_rec path_rec; 1639 ib_sa_comp_mask comp_mask; 1640 struct sockaddr_in6 *sin6; 1641 1642 memset(&path_rec, 0, sizeof path_rec); 1643 rdma_addr_get_sgid(&addr->dev_addr, &path_rec.sgid); 1644 rdma_addr_get_dgid(&addr->dev_addr, &path_rec.dgid); 1645 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr)); 1646 path_rec.numb_path = 1; 1647 path_rec.reversible = 1; 1648 path_rec.service_id = cma_get_service_id(id_priv->id.ps, 1649 (struct sockaddr *) &addr->dst_addr); 1650 1651 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | 1652 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | 1653 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID; 1654 1655 if (addr->src_addr.ss_family == AF_INET) { 1656 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); 1657 comp_mask |= IB_SA_PATH_REC_QOS_CLASS; 1658 } else { 1659 sin6 = (struct sockaddr_in6 *) &addr->src_addr; 1660 path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20); 1661 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; 1662 } 1663 1664 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, 1665 id_priv->id.port_num, &path_rec, 1666 comp_mask, timeout_ms, 1667 GFP_KERNEL, cma_query_handler, 1668 work, &id_priv->query); 1669 1670 return (id_priv->query_id < 0) ? id_priv->query_id : 0; 1671 } 1672 1673 static void cma_work_handler(struct work_struct *_work) 1674 { 1675 struct cma_work *work = container_of(_work, struct cma_work, work); 1676 struct rdma_id_private *id_priv = work->id; 1677 int destroy = 0; 1678 1679 mutex_lock(&id_priv->handler_mutex); 1680 if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) 1681 goto out; 1682 1683 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 1684 cma_exch(id_priv, RDMA_CM_DESTROYING); 1685 destroy = 1; 1686 } 1687 out: 1688 mutex_unlock(&id_priv->handler_mutex); 1689 cma_deref_id(id_priv); 1690 if (destroy) 1691 rdma_destroy_id(&id_priv->id); 1692 kfree(work); 1693 } 1694 1695 static void cma_ndev_work_handler(struct work_struct *_work) 1696 { 1697 struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work); 1698 struct rdma_id_private *id_priv = work->id; 1699 int destroy = 0; 1700 1701 mutex_lock(&id_priv->handler_mutex); 1702 if (id_priv->state == RDMA_CM_DESTROYING || 1703 id_priv->state == RDMA_CM_DEVICE_REMOVAL) 1704 goto out; 1705 1706 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 1707 cma_exch(id_priv, RDMA_CM_DESTROYING); 1708 destroy = 1; 1709 } 1710 1711 out: 1712 mutex_unlock(&id_priv->handler_mutex); 1713 cma_deref_id(id_priv); 1714 if (destroy) 1715 rdma_destroy_id(&id_priv->id); 1716 kfree(work); 1717 } 1718 1719 static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) 1720 { 1721 struct rdma_route *route = &id_priv->id.route; 1722 struct cma_work *work; 1723 int ret; 1724 1725 work = kzalloc(sizeof *work, GFP_KERNEL); 1726 if (!work) 1727 return -ENOMEM; 1728 1729 work->id = id_priv; 1730 INIT_WORK(&work->work, cma_work_handler); 1731 work->old_state = RDMA_CM_ROUTE_QUERY; 1732 work->new_state = RDMA_CM_ROUTE_RESOLVED; 1733 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1734 1735 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); 1736 if (!route->path_rec) { 1737 ret = -ENOMEM; 1738 goto err1; 1739 } 1740 1741 ret = cma_query_ib_route(id_priv, timeout_ms, work); 1742 if (ret) 1743 goto err2; 1744 1745 return 0; 1746 err2: 1747 kfree(route->path_rec); 1748 route->path_rec = NULL; 1749 err1: 1750 kfree(work); 1751 return ret; 1752 } 1753 1754 int rdma_set_ib_paths(struct rdma_cm_id *id, 1755 struct ib_sa_path_rec *path_rec, int num_paths) 1756 { 1757 struct rdma_id_private *id_priv; 1758 int ret; 1759 1760 id_priv = container_of(id, struct rdma_id_private, id); 1761 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, 1762 RDMA_CM_ROUTE_RESOLVED)) 1763 return -EINVAL; 1764 1765 id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths, 1766 GFP_KERNEL); 1767 if (!id->route.path_rec) { 1768 ret = -ENOMEM; 1769 goto err; 1770 } 1771 1772 id->route.num_paths = num_paths; 1773 return 0; 1774 err: 1775 cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED); 1776 return ret; 1777 } 1778 EXPORT_SYMBOL(rdma_set_ib_paths); 1779 1780 static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms) 1781 { 1782 struct cma_work *work; 1783 1784 work = kzalloc(sizeof *work, GFP_KERNEL); 1785 if (!work) 1786 return -ENOMEM; 1787 1788 work->id = id_priv; 1789 INIT_WORK(&work->work, cma_work_handler); 1790 work->old_state = RDMA_CM_ROUTE_QUERY; 1791 work->new_state = RDMA_CM_ROUTE_RESOLVED; 1792 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1793 queue_work(cma_wq, &work->work); 1794 return 0; 1795 } 1796 1797 static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) 1798 { 1799 struct rdma_route *route = &id_priv->id.route; 1800 struct rdma_addr *addr = &route->addr; 1801 struct cma_work *work; 1802 int ret; 1803 struct sockaddr_in *src_addr = (struct sockaddr_in *)&route->addr.src_addr; 1804 struct sockaddr_in *dst_addr = (struct sockaddr_in *)&route->addr.dst_addr; 1805 struct net_device *ndev = NULL; 1806 u16 vid; 1807 1808 if (src_addr->sin_family != dst_addr->sin_family) 1809 return -EINVAL; 1810 1811 work = kzalloc(sizeof *work, GFP_KERNEL); 1812 if (!work) 1813 return -ENOMEM; 1814 1815 work->id = id_priv; 1816 INIT_WORK(&work->work, cma_work_handler); 1817 1818 route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL); 1819 if (!route->path_rec) { 1820 ret = -ENOMEM; 1821 goto err1; 1822 } 1823 1824 route->num_paths = 1; 1825 1826 if (addr->dev_addr.bound_dev_if) 1827 ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if); 1828 if (!ndev) { 1829 ret = -ENODEV; 1830 goto err2; 1831 } 1832 1833 vid = rdma_vlan_dev_vlan_id(ndev); 1834 1835 iboe_mac_vlan_to_ll(&route->path_rec->sgid, addr->dev_addr.src_dev_addr, vid); 1836 iboe_mac_vlan_to_ll(&route->path_rec->dgid, addr->dev_addr.dst_dev_addr, vid); 1837 1838 route->path_rec->hop_limit = 1; 1839 route->path_rec->reversible = 1; 1840 route->path_rec->pkey = cpu_to_be16(0xffff); 1841 route->path_rec->mtu_selector = IB_SA_EQ; 1842 route->path_rec->sl = netdev_get_prio_tc_map( 1843 ndev->priv_flags & IFF_802_1Q_VLAN ? 1844 vlan_dev_real_dev(ndev) : ndev, 1845 rt_tos2priority(id_priv->tos)); 1846 1847 route->path_rec->mtu = iboe_get_mtu(ndev->mtu); 1848 route->path_rec->rate_selector = IB_SA_EQ; 1849 route->path_rec->rate = iboe_get_rate(ndev); 1850 dev_put(ndev); 1851 route->path_rec->packet_life_time_selector = IB_SA_EQ; 1852 route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME; 1853 if (!route->path_rec->mtu) { 1854 ret = -EINVAL; 1855 goto err2; 1856 } 1857 1858 work->old_state = RDMA_CM_ROUTE_QUERY; 1859 work->new_state = RDMA_CM_ROUTE_RESOLVED; 1860 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1861 work->event.status = 0; 1862 1863 queue_work(cma_wq, &work->work); 1864 1865 return 0; 1866 1867 err2: 1868 kfree(route->path_rec); 1869 route->path_rec = NULL; 1870 err1: 1871 kfree(work); 1872 return ret; 1873 } 1874 1875 int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) 1876 { 1877 struct rdma_id_private *id_priv; 1878 int ret; 1879 1880 id_priv = container_of(id, struct rdma_id_private, id); 1881 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY)) 1882 return -EINVAL; 1883 1884 atomic_inc(&id_priv->refcount); 1885 switch (rdma_node_get_transport(id->device->node_type)) { 1886 case RDMA_TRANSPORT_IB: 1887 switch (rdma_port_get_link_layer(id->device, id->port_num)) { 1888 case IB_LINK_LAYER_INFINIBAND: 1889 ret = cma_resolve_ib_route(id_priv, timeout_ms); 1890 break; 1891 case IB_LINK_LAYER_ETHERNET: 1892 ret = cma_resolve_iboe_route(id_priv); 1893 break; 1894 default: 1895 ret = -ENOSYS; 1896 } 1897 break; 1898 case RDMA_TRANSPORT_IWARP: 1899 ret = cma_resolve_iw_route(id_priv, timeout_ms); 1900 break; 1901 default: 1902 ret = -ENOSYS; 1903 break; 1904 } 1905 if (ret) 1906 goto err; 1907 1908 return 0; 1909 err: 1910 cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED); 1911 cma_deref_id(id_priv); 1912 return ret; 1913 } 1914 EXPORT_SYMBOL(rdma_resolve_route); 1915 1916 static int cma_bind_loopback(struct rdma_id_private *id_priv) 1917 { 1918 struct cma_device *cma_dev; 1919 struct ib_port_attr port_attr; 1920 union ib_gid gid; 1921 u16 pkey; 1922 int ret; 1923 u8 p; 1924 1925 mutex_lock(&lock); 1926 if (list_empty(&dev_list)) { 1927 ret = -ENODEV; 1928 goto out; 1929 } 1930 list_for_each_entry(cma_dev, &dev_list, list) 1931 for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p) 1932 if (!ib_query_port(cma_dev->device, p, &port_attr) && 1933 port_attr.state == IB_PORT_ACTIVE) 1934 goto port_found; 1935 1936 p = 1; 1937 cma_dev = list_entry(dev_list.next, struct cma_device, list); 1938 1939 port_found: 1940 ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid); 1941 if (ret) 1942 goto out; 1943 1944 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey); 1945 if (ret) 1946 goto out; 1947 1948 id_priv->id.route.addr.dev_addr.dev_type = 1949 (rdma_port_get_link_layer(cma_dev->device, p) == IB_LINK_LAYER_INFINIBAND) ? 1950 ARPHRD_INFINIBAND : ARPHRD_ETHER; 1951 1952 rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); 1953 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); 1954 id_priv->id.port_num = p; 1955 cma_attach_to_dev(id_priv, cma_dev); 1956 out: 1957 mutex_unlock(&lock); 1958 return ret; 1959 } 1960 1961 static void addr_handler(int status, struct sockaddr *src_addr, 1962 struct rdma_dev_addr *dev_addr, void *context) 1963 { 1964 struct rdma_id_private *id_priv = context; 1965 struct rdma_cm_event event; 1966 1967 memset(&event, 0, sizeof event); 1968 mutex_lock(&id_priv->handler_mutex); 1969 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, 1970 RDMA_CM_ADDR_RESOLVED)) 1971 goto out; 1972 1973 if (!status && !id_priv->cma_dev) 1974 status = cma_acquire_dev(id_priv); 1975 1976 if (status) { 1977 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, 1978 RDMA_CM_ADDR_BOUND)) 1979 goto out; 1980 event.event = RDMA_CM_EVENT_ADDR_ERROR; 1981 event.status = status; 1982 } else { 1983 memcpy(&id_priv->id.route.addr.src_addr, src_addr, 1984 ip_addr_size(src_addr)); 1985 event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 1986 } 1987 1988 if (id_priv->id.event_handler(&id_priv->id, &event)) { 1989 cma_exch(id_priv, RDMA_CM_DESTROYING); 1990 mutex_unlock(&id_priv->handler_mutex); 1991 cma_deref_id(id_priv); 1992 rdma_destroy_id(&id_priv->id); 1993 return; 1994 } 1995 out: 1996 mutex_unlock(&id_priv->handler_mutex); 1997 cma_deref_id(id_priv); 1998 } 1999 2000 static int cma_resolve_loopback(struct rdma_id_private *id_priv) 2001 { 2002 struct cma_work *work; 2003 struct sockaddr *src, *dst; 2004 union ib_gid gid; 2005 int ret; 2006 2007 work = kzalloc(sizeof *work, GFP_KERNEL); 2008 if (!work) 2009 return -ENOMEM; 2010 2011 if (!id_priv->cma_dev) { 2012 ret = cma_bind_loopback(id_priv); 2013 if (ret) 2014 goto err; 2015 } 2016 2017 rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); 2018 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); 2019 2020 src = (struct sockaddr *) &id_priv->id.route.addr.src_addr; 2021 if (cma_zero_addr(src)) { 2022 dst = (struct sockaddr *) &id_priv->id.route.addr.dst_addr; 2023 if ((src->sa_family = dst->sa_family) == AF_INET) { 2024 ((struct sockaddr_in *)src)->sin_addr = 2025 ((struct sockaddr_in *)dst)->sin_addr; 2026 } else { 2027 ((struct sockaddr_in6 *)src)->sin6_addr = 2028 ((struct sockaddr_in6 *)dst)->sin6_addr; 2029 } 2030 } 2031 2032 work->id = id_priv; 2033 INIT_WORK(&work->work, cma_work_handler); 2034 work->old_state = RDMA_CM_ADDR_QUERY; 2035 work->new_state = RDMA_CM_ADDR_RESOLVED; 2036 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 2037 queue_work(cma_wq, &work->work); 2038 return 0; 2039 err: 2040 kfree(work); 2041 return ret; 2042 } 2043 2044 static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 2045 struct sockaddr *dst_addr) 2046 { 2047 if (!src_addr || !src_addr->sa_family) { 2048 src_addr = (struct sockaddr *) &id->route.addr.src_addr; 2049 if ((src_addr->sa_family = dst_addr->sa_family) == AF_INET6) { 2050 ((struct sockaddr_in6 *) src_addr)->sin6_scope_id = 2051 ((struct sockaddr_in6 *) dst_addr)->sin6_scope_id; 2052 } 2053 } 2054 return rdma_bind_addr(id, src_addr); 2055 } 2056 2057 int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 2058 struct sockaddr *dst_addr, int timeout_ms) 2059 { 2060 struct rdma_id_private *id_priv; 2061 int ret; 2062 2063 id_priv = container_of(id, struct rdma_id_private, id); 2064 if (id_priv->state == RDMA_CM_IDLE) { 2065 ret = cma_bind_addr(id, src_addr, dst_addr); 2066 if (ret) 2067 return ret; 2068 } 2069 2070 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) 2071 return -EINVAL; 2072 2073 atomic_inc(&id_priv->refcount); 2074 memcpy(&id->route.addr.dst_addr, dst_addr, ip_addr_size(dst_addr)); 2075 if (cma_any_addr(dst_addr)) 2076 ret = cma_resolve_loopback(id_priv); 2077 else 2078 ret = rdma_resolve_ip(&addr_client, (struct sockaddr *) &id->route.addr.src_addr, 2079 dst_addr, &id->route.addr.dev_addr, 2080 timeout_ms, addr_handler, id_priv); 2081 if (ret) 2082 goto err; 2083 2084 return 0; 2085 err: 2086 cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); 2087 cma_deref_id(id_priv); 2088 return ret; 2089 } 2090 EXPORT_SYMBOL(rdma_resolve_addr); 2091 2092 int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse) 2093 { 2094 struct rdma_id_private *id_priv; 2095 unsigned long flags; 2096 int ret; 2097 2098 id_priv = container_of(id, struct rdma_id_private, id); 2099 spin_lock_irqsave(&id_priv->lock, flags); 2100 if (id_priv->state == RDMA_CM_IDLE) { 2101 id_priv->reuseaddr = reuse; 2102 ret = 0; 2103 } else { 2104 ret = -EINVAL; 2105 } 2106 spin_unlock_irqrestore(&id_priv->lock, flags); 2107 return ret; 2108 } 2109 EXPORT_SYMBOL(rdma_set_reuseaddr); 2110 2111 int rdma_set_afonly(struct rdma_cm_id *id, int afonly) 2112 { 2113 struct rdma_id_private *id_priv; 2114 unsigned long flags; 2115 int ret; 2116 2117 id_priv = container_of(id, struct rdma_id_private, id); 2118 spin_lock_irqsave(&id_priv->lock, flags); 2119 if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) { 2120 id_priv->options |= (1 << CMA_OPTION_AFONLY); 2121 id_priv->afonly = afonly; 2122 ret = 0; 2123 } else { 2124 ret = -EINVAL; 2125 } 2126 spin_unlock_irqrestore(&id_priv->lock, flags); 2127 return ret; 2128 } 2129 EXPORT_SYMBOL(rdma_set_afonly); 2130 2131 static void cma_bind_port(struct rdma_bind_list *bind_list, 2132 struct rdma_id_private *id_priv) 2133 { 2134 struct sockaddr_in *sin; 2135 2136 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 2137 sin->sin_port = htons(bind_list->port); 2138 id_priv->bind_list = bind_list; 2139 hlist_add_head(&id_priv->node, &bind_list->owners); 2140 } 2141 2142 static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv, 2143 unsigned short snum) 2144 { 2145 struct rdma_bind_list *bind_list; 2146 int port, ret; 2147 2148 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); 2149 if (!bind_list) 2150 return -ENOMEM; 2151 2152 do { 2153 ret = idr_get_new_above(ps, bind_list, snum, &port); 2154 } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL)); 2155 2156 if (ret) 2157 goto err1; 2158 2159 if (port != snum) { 2160 ret = -EADDRNOTAVAIL; 2161 goto err2; 2162 } 2163 2164 bind_list->ps = ps; 2165 bind_list->port = (unsigned short) port; 2166 cma_bind_port(bind_list, id_priv); 2167 return 0; 2168 err2: 2169 idr_remove(ps, port); 2170 err1: 2171 kfree(bind_list); 2172 return ret; 2173 } 2174 2175 static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv) 2176 { 2177 static unsigned int last_used_port; 2178 int low, high, remaining; 2179 unsigned int rover; 2180 2181 inet_get_local_port_range(&low, &high); 2182 remaining = (high - low) + 1; 2183 rover = net_random() % remaining + low; 2184 retry: 2185 if (last_used_port != rover && 2186 !idr_find(ps, (unsigned short) rover)) { 2187 int ret = cma_alloc_port(ps, id_priv, rover); 2188 /* 2189 * Remember previously used port number in order to avoid 2190 * re-using same port immediately after it is closed. 2191 */ 2192 if (!ret) 2193 last_used_port = rover; 2194 if (ret != -EADDRNOTAVAIL) 2195 return ret; 2196 } 2197 if (--remaining) { 2198 rover++; 2199 if ((rover < low) || (rover > high)) 2200 rover = low; 2201 goto retry; 2202 } 2203 return -EADDRNOTAVAIL; 2204 } 2205 2206 /* 2207 * Check that the requested port is available. This is called when trying to 2208 * bind to a specific port, or when trying to listen on a bound port. In 2209 * the latter case, the provided id_priv may already be on the bind_list, but 2210 * we still need to check that it's okay to start listening. 2211 */ 2212 static int cma_check_port(struct rdma_bind_list *bind_list, 2213 struct rdma_id_private *id_priv, uint8_t reuseaddr) 2214 { 2215 struct rdma_id_private *cur_id; 2216 struct sockaddr *addr, *cur_addr; 2217 struct hlist_node *node; 2218 2219 addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr; 2220 hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { 2221 if (id_priv == cur_id) 2222 continue; 2223 2224 if ((cur_id->state != RDMA_CM_LISTEN) && reuseaddr && 2225 cur_id->reuseaddr) 2226 continue; 2227 2228 cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr; 2229 if (id_priv->afonly && cur_id->afonly && 2230 (addr->sa_family != cur_addr->sa_family)) 2231 continue; 2232 2233 if (cma_any_addr(addr) || cma_any_addr(cur_addr)) 2234 return -EADDRNOTAVAIL; 2235 2236 if (!cma_addr_cmp(addr, cur_addr)) 2237 return -EADDRINUSE; 2238 } 2239 return 0; 2240 } 2241 2242 static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) 2243 { 2244 struct rdma_bind_list *bind_list; 2245 unsigned short snum; 2246 int ret; 2247 2248 snum = ntohs(cma_port((struct sockaddr *) &id_priv->id.route.addr.src_addr)); 2249 if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) 2250 return -EACCES; 2251 2252 bind_list = idr_find(ps, snum); 2253 if (!bind_list) { 2254 ret = cma_alloc_port(ps, id_priv, snum); 2255 } else { 2256 ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr); 2257 if (!ret) 2258 cma_bind_port(bind_list, id_priv); 2259 } 2260 return ret; 2261 } 2262 2263 static int cma_bind_listen(struct rdma_id_private *id_priv) 2264 { 2265 struct rdma_bind_list *bind_list = id_priv->bind_list; 2266 int ret = 0; 2267 2268 mutex_lock(&lock); 2269 if (bind_list->owners.first->next) 2270 ret = cma_check_port(bind_list, id_priv, 0); 2271 mutex_unlock(&lock); 2272 return ret; 2273 } 2274 2275 static int cma_get_port(struct rdma_id_private *id_priv) 2276 { 2277 struct idr *ps; 2278 int ret; 2279 2280 switch (id_priv->id.ps) { 2281 case RDMA_PS_SDP: 2282 ps = &sdp_ps; 2283 break; 2284 case RDMA_PS_TCP: 2285 ps = &tcp_ps; 2286 break; 2287 case RDMA_PS_UDP: 2288 ps = &udp_ps; 2289 break; 2290 case RDMA_PS_IPOIB: 2291 ps = &ipoib_ps; 2292 break; 2293 case RDMA_PS_IB: 2294 ps = &ib_ps; 2295 break; 2296 default: 2297 return -EPROTONOSUPPORT; 2298 } 2299 2300 mutex_lock(&lock); 2301 if (cma_any_port((struct sockaddr *) &id_priv->id.route.addr.src_addr)) 2302 ret = cma_alloc_any_port(ps, id_priv); 2303 else 2304 ret = cma_use_port(ps, id_priv); 2305 mutex_unlock(&lock); 2306 2307 return ret; 2308 } 2309 2310 static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, 2311 struct sockaddr *addr) 2312 { 2313 #if IS_ENABLED(CONFIG_IPV6) 2314 struct sockaddr_in6 *sin6; 2315 2316 if (addr->sa_family != AF_INET6) 2317 return 0; 2318 2319 sin6 = (struct sockaddr_in6 *) addr; 2320 if ((ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) && 2321 !sin6->sin6_scope_id) 2322 return -EINVAL; 2323 2324 dev_addr->bound_dev_if = sin6->sin6_scope_id; 2325 #endif 2326 return 0; 2327 } 2328 2329 int rdma_listen(struct rdma_cm_id *id, int backlog) 2330 { 2331 struct rdma_id_private *id_priv; 2332 int ret; 2333 2334 id_priv = container_of(id, struct rdma_id_private, id); 2335 if (id_priv->state == RDMA_CM_IDLE) { 2336 ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET; 2337 ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr); 2338 if (ret) 2339 return ret; 2340 } 2341 2342 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) 2343 return -EINVAL; 2344 2345 if (id_priv->reuseaddr) { 2346 ret = cma_bind_listen(id_priv); 2347 if (ret) 2348 goto err; 2349 } 2350 2351 id_priv->backlog = backlog; 2352 if (id->device) { 2353 switch (rdma_node_get_transport(id->device->node_type)) { 2354 case RDMA_TRANSPORT_IB: 2355 ret = cma_ib_listen(id_priv); 2356 if (ret) 2357 goto err; 2358 break; 2359 case RDMA_TRANSPORT_IWARP: 2360 ret = cma_iw_listen(id_priv, backlog); 2361 if (ret) 2362 goto err; 2363 break; 2364 default: 2365 ret = -ENOSYS; 2366 goto err; 2367 } 2368 } else 2369 cma_listen_on_all(id_priv); 2370 2371 return 0; 2372 err: 2373 id_priv->backlog = 0; 2374 cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND); 2375 return ret; 2376 } 2377 EXPORT_SYMBOL(rdma_listen); 2378 2379 int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) 2380 { 2381 struct rdma_id_private *id_priv; 2382 int ret; 2383 2384 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6) 2385 return -EAFNOSUPPORT; 2386 2387 id_priv = container_of(id, struct rdma_id_private, id); 2388 if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND)) 2389 return -EINVAL; 2390 2391 ret = cma_check_linklocal(&id->route.addr.dev_addr, addr); 2392 if (ret) 2393 goto err1; 2394 2395 if (!cma_any_addr(addr)) { 2396 ret = rdma_translate_ip(addr, &id->route.addr.dev_addr); 2397 if (ret) 2398 goto err1; 2399 2400 ret = cma_acquire_dev(id_priv); 2401 if (ret) 2402 goto err1; 2403 } 2404 2405 memcpy(&id->route.addr.src_addr, addr, ip_addr_size(addr)); 2406 if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) { 2407 if (addr->sa_family == AF_INET) 2408 id_priv->afonly = 1; 2409 #if IS_ENABLED(CONFIG_IPV6) 2410 else if (addr->sa_family == AF_INET6) 2411 id_priv->afonly = init_net.ipv6.sysctl.bindv6only; 2412 #endif 2413 } 2414 ret = cma_get_port(id_priv); 2415 if (ret) 2416 goto err2; 2417 2418 return 0; 2419 err2: 2420 if (id_priv->cma_dev) 2421 cma_release_dev(id_priv); 2422 err1: 2423 cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE); 2424 return ret; 2425 } 2426 EXPORT_SYMBOL(rdma_bind_addr); 2427 2428 static int cma_format_hdr(void *hdr, enum rdma_port_space ps, 2429 struct rdma_route *route) 2430 { 2431 struct cma_hdr *cma_hdr; 2432 struct sdp_hh *sdp_hdr; 2433 2434 if (route->addr.src_addr.ss_family == AF_INET) { 2435 struct sockaddr_in *src4, *dst4; 2436 2437 src4 = (struct sockaddr_in *) &route->addr.src_addr; 2438 dst4 = (struct sockaddr_in *) &route->addr.dst_addr; 2439 2440 switch (ps) { 2441 case RDMA_PS_SDP: 2442 sdp_hdr = hdr; 2443 if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION) 2444 return -EINVAL; 2445 sdp_set_ip_ver(sdp_hdr, 4); 2446 sdp_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; 2447 sdp_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; 2448 sdp_hdr->port = src4->sin_port; 2449 break; 2450 default: 2451 cma_hdr = hdr; 2452 cma_hdr->cma_version = CMA_VERSION; 2453 cma_set_ip_ver(cma_hdr, 4); 2454 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; 2455 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; 2456 cma_hdr->port = src4->sin_port; 2457 break; 2458 } 2459 } else { 2460 struct sockaddr_in6 *src6, *dst6; 2461 2462 src6 = (struct sockaddr_in6 *) &route->addr.src_addr; 2463 dst6 = (struct sockaddr_in6 *) &route->addr.dst_addr; 2464 2465 switch (ps) { 2466 case RDMA_PS_SDP: 2467 sdp_hdr = hdr; 2468 if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION) 2469 return -EINVAL; 2470 sdp_set_ip_ver(sdp_hdr, 6); 2471 sdp_hdr->src_addr.ip6 = src6->sin6_addr; 2472 sdp_hdr->dst_addr.ip6 = dst6->sin6_addr; 2473 sdp_hdr->port = src6->sin6_port; 2474 break; 2475 default: 2476 cma_hdr = hdr; 2477 cma_hdr->cma_version = CMA_VERSION; 2478 cma_set_ip_ver(cma_hdr, 6); 2479 cma_hdr->src_addr.ip6 = src6->sin6_addr; 2480 cma_hdr->dst_addr.ip6 = dst6->sin6_addr; 2481 cma_hdr->port = src6->sin6_port; 2482 break; 2483 } 2484 } 2485 return 0; 2486 } 2487 2488 static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, 2489 struct ib_cm_event *ib_event) 2490 { 2491 struct rdma_id_private *id_priv = cm_id->context; 2492 struct rdma_cm_event event; 2493 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; 2494 int ret = 0; 2495 2496 if (cma_disable_callback(id_priv, RDMA_CM_CONNECT)) 2497 return 0; 2498 2499 memset(&event, 0, sizeof event); 2500 switch (ib_event->event) { 2501 case IB_CM_SIDR_REQ_ERROR: 2502 event.event = RDMA_CM_EVENT_UNREACHABLE; 2503 event.status = -ETIMEDOUT; 2504 break; 2505 case IB_CM_SIDR_REP_RECEIVED: 2506 event.param.ud.private_data = ib_event->private_data; 2507 event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE; 2508 if (rep->status != IB_SIDR_SUCCESS) { 2509 event.event = RDMA_CM_EVENT_UNREACHABLE; 2510 event.status = ib_event->param.sidr_rep_rcvd.status; 2511 break; 2512 } 2513 ret = cma_set_qkey(id_priv); 2514 if (ret) { 2515 event.event = RDMA_CM_EVENT_ADDR_ERROR; 2516 event.status = -EINVAL; 2517 break; 2518 } 2519 if (id_priv->qkey != rep->qkey) { 2520 event.event = RDMA_CM_EVENT_UNREACHABLE; 2521 event.status = -EINVAL; 2522 break; 2523 } 2524 ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num, 2525 id_priv->id.route.path_rec, 2526 &event.param.ud.ah_attr); 2527 event.param.ud.qp_num = rep->qpn; 2528 event.param.ud.qkey = rep->qkey; 2529 event.event = RDMA_CM_EVENT_ESTABLISHED; 2530 event.status = 0; 2531 break; 2532 default: 2533 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n", 2534 ib_event->event); 2535 goto out; 2536 } 2537 2538 ret = id_priv->id.event_handler(&id_priv->id, &event); 2539 if (ret) { 2540 /* Destroy the CM ID by returning a non-zero value. */ 2541 id_priv->cm_id.ib = NULL; 2542 cma_exch(id_priv, RDMA_CM_DESTROYING); 2543 mutex_unlock(&id_priv->handler_mutex); 2544 rdma_destroy_id(&id_priv->id); 2545 return ret; 2546 } 2547 out: 2548 mutex_unlock(&id_priv->handler_mutex); 2549 return ret; 2550 } 2551 2552 static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, 2553 struct rdma_conn_param *conn_param) 2554 { 2555 struct ib_cm_sidr_req_param req; 2556 struct rdma_route *route; 2557 struct ib_cm_id *id; 2558 int ret; 2559 2560 req.private_data_len = sizeof(struct cma_hdr) + 2561 conn_param->private_data_len; 2562 if (req.private_data_len < conn_param->private_data_len) 2563 return -EINVAL; 2564 2565 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 2566 if (!req.private_data) 2567 return -ENOMEM; 2568 2569 if (conn_param->private_data && conn_param->private_data_len) 2570 memcpy((void *) req.private_data + sizeof(struct cma_hdr), 2571 conn_param->private_data, conn_param->private_data_len); 2572 2573 route = &id_priv->id.route; 2574 ret = cma_format_hdr((void *) req.private_data, id_priv->id.ps, route); 2575 if (ret) 2576 goto out; 2577 2578 id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler, 2579 id_priv); 2580 if (IS_ERR(id)) { 2581 ret = PTR_ERR(id); 2582 goto out; 2583 } 2584 id_priv->cm_id.ib = id; 2585 2586 req.path = route->path_rec; 2587 req.service_id = cma_get_service_id(id_priv->id.ps, 2588 (struct sockaddr *) &route->addr.dst_addr); 2589 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); 2590 req.max_cm_retries = CMA_MAX_CM_RETRIES; 2591 2592 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req); 2593 if (ret) { 2594 ib_destroy_cm_id(id_priv->cm_id.ib); 2595 id_priv->cm_id.ib = NULL; 2596 } 2597 out: 2598 kfree(req.private_data); 2599 return ret; 2600 } 2601 2602 static int cma_connect_ib(struct rdma_id_private *id_priv, 2603 struct rdma_conn_param *conn_param) 2604 { 2605 struct ib_cm_req_param req; 2606 struct rdma_route *route; 2607 void *private_data; 2608 struct ib_cm_id *id; 2609 int offset, ret; 2610 2611 memset(&req, 0, sizeof req); 2612 offset = cma_user_data_offset(id_priv->id.ps); 2613 req.private_data_len = offset + conn_param->private_data_len; 2614 if (req.private_data_len < conn_param->private_data_len) 2615 return -EINVAL; 2616 2617 private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 2618 if (!private_data) 2619 return -ENOMEM; 2620 2621 if (conn_param->private_data && conn_param->private_data_len) 2622 memcpy(private_data + offset, conn_param->private_data, 2623 conn_param->private_data_len); 2624 2625 id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv); 2626 if (IS_ERR(id)) { 2627 ret = PTR_ERR(id); 2628 goto out; 2629 } 2630 id_priv->cm_id.ib = id; 2631 2632 route = &id_priv->id.route; 2633 ret = cma_format_hdr(private_data, id_priv->id.ps, route); 2634 if (ret) 2635 goto out; 2636 req.private_data = private_data; 2637 2638 req.primary_path = &route->path_rec[0]; 2639 if (route->num_paths == 2) 2640 req.alternate_path = &route->path_rec[1]; 2641 2642 req.service_id = cma_get_service_id(id_priv->id.ps, 2643 (struct sockaddr *) &route->addr.dst_addr); 2644 req.qp_num = id_priv->qp_num; 2645 req.qp_type = id_priv->id.qp_type; 2646 req.starting_psn = id_priv->seq_num; 2647 req.responder_resources = conn_param->responder_resources; 2648 req.initiator_depth = conn_param->initiator_depth; 2649 req.flow_control = conn_param->flow_control; 2650 req.retry_count = min_t(u8, 7, conn_param->retry_count); 2651 req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); 2652 req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 2653 req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 2654 req.max_cm_retries = CMA_MAX_CM_RETRIES; 2655 req.srq = id_priv->srq ? 1 : 0; 2656 2657 ret = ib_send_cm_req(id_priv->cm_id.ib, &req); 2658 out: 2659 if (ret && !IS_ERR(id)) { 2660 ib_destroy_cm_id(id); 2661 id_priv->cm_id.ib = NULL; 2662 } 2663 2664 kfree(private_data); 2665 return ret; 2666 } 2667 2668 static int cma_connect_iw(struct rdma_id_private *id_priv, 2669 struct rdma_conn_param *conn_param) 2670 { 2671 struct iw_cm_id *cm_id; 2672 struct sockaddr_in* sin; 2673 int ret; 2674 struct iw_cm_conn_param iw_param; 2675 2676 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv); 2677 if (IS_ERR(cm_id)) 2678 return PTR_ERR(cm_id); 2679 2680 id_priv->cm_id.iw = cm_id; 2681 2682 sin = (struct sockaddr_in*) &id_priv->id.route.addr.src_addr; 2683 cm_id->local_addr = *sin; 2684 2685 sin = (struct sockaddr_in*) &id_priv->id.route.addr.dst_addr; 2686 cm_id->remote_addr = *sin; 2687 2688 ret = cma_modify_qp_rtr(id_priv, conn_param); 2689 if (ret) 2690 goto out; 2691 2692 if (conn_param) { 2693 iw_param.ord = conn_param->initiator_depth; 2694 iw_param.ird = conn_param->responder_resources; 2695 iw_param.private_data = conn_param->private_data; 2696 iw_param.private_data_len = conn_param->private_data_len; 2697 iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num; 2698 } else { 2699 memset(&iw_param, 0, sizeof iw_param); 2700 iw_param.qpn = id_priv->qp_num; 2701 } 2702 ret = iw_cm_connect(cm_id, &iw_param); 2703 out: 2704 if (ret) { 2705 iw_destroy_cm_id(cm_id); 2706 id_priv->cm_id.iw = NULL; 2707 } 2708 return ret; 2709 } 2710 2711 int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 2712 { 2713 struct rdma_id_private *id_priv; 2714 int ret; 2715 2716 id_priv = container_of(id, struct rdma_id_private, id); 2717 if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT)) 2718 return -EINVAL; 2719 2720 if (!id->qp) { 2721 id_priv->qp_num = conn_param->qp_num; 2722 id_priv->srq = conn_param->srq; 2723 } 2724 2725 switch (rdma_node_get_transport(id->device->node_type)) { 2726 case RDMA_TRANSPORT_IB: 2727 if (id->qp_type == IB_QPT_UD) 2728 ret = cma_resolve_ib_udp(id_priv, conn_param); 2729 else 2730 ret = cma_connect_ib(id_priv, conn_param); 2731 break; 2732 case RDMA_TRANSPORT_IWARP: 2733 ret = cma_connect_iw(id_priv, conn_param); 2734 break; 2735 default: 2736 ret = -ENOSYS; 2737 break; 2738 } 2739 if (ret) 2740 goto err; 2741 2742 return 0; 2743 err: 2744 cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED); 2745 return ret; 2746 } 2747 EXPORT_SYMBOL(rdma_connect); 2748 2749 static int cma_accept_ib(struct rdma_id_private *id_priv, 2750 struct rdma_conn_param *conn_param) 2751 { 2752 struct ib_cm_rep_param rep; 2753 int ret; 2754 2755 ret = cma_modify_qp_rtr(id_priv, conn_param); 2756 if (ret) 2757 goto out; 2758 2759 ret = cma_modify_qp_rts(id_priv, conn_param); 2760 if (ret) 2761 goto out; 2762 2763 memset(&rep, 0, sizeof rep); 2764 rep.qp_num = id_priv->qp_num; 2765 rep.starting_psn = id_priv->seq_num; 2766 rep.private_data = conn_param->private_data; 2767 rep.private_data_len = conn_param->private_data_len; 2768 rep.responder_resources = conn_param->responder_resources; 2769 rep.initiator_depth = conn_param->initiator_depth; 2770 rep.failover_accepted = 0; 2771 rep.flow_control = conn_param->flow_control; 2772 rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); 2773 rep.srq = id_priv->srq ? 1 : 0; 2774 2775 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); 2776 out: 2777 return ret; 2778 } 2779 2780 static int cma_accept_iw(struct rdma_id_private *id_priv, 2781 struct rdma_conn_param *conn_param) 2782 { 2783 struct iw_cm_conn_param iw_param; 2784 int ret; 2785 2786 ret = cma_modify_qp_rtr(id_priv, conn_param); 2787 if (ret) 2788 return ret; 2789 2790 iw_param.ord = conn_param->initiator_depth; 2791 iw_param.ird = conn_param->responder_resources; 2792 iw_param.private_data = conn_param->private_data; 2793 iw_param.private_data_len = conn_param->private_data_len; 2794 if (id_priv->id.qp) { 2795 iw_param.qpn = id_priv->qp_num; 2796 } else 2797 iw_param.qpn = conn_param->qp_num; 2798 2799 return iw_cm_accept(id_priv->cm_id.iw, &iw_param); 2800 } 2801 2802 static int cma_send_sidr_rep(struct rdma_id_private *id_priv, 2803 enum ib_cm_sidr_status status, 2804 const void *private_data, int private_data_len) 2805 { 2806 struct ib_cm_sidr_rep_param rep; 2807 int ret; 2808 2809 memset(&rep, 0, sizeof rep); 2810 rep.status = status; 2811 if (status == IB_SIDR_SUCCESS) { 2812 ret = cma_set_qkey(id_priv); 2813 if (ret) 2814 return ret; 2815 rep.qp_num = id_priv->qp_num; 2816 rep.qkey = id_priv->qkey; 2817 } 2818 rep.private_data = private_data; 2819 rep.private_data_len = private_data_len; 2820 2821 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep); 2822 } 2823 2824 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 2825 { 2826 struct rdma_id_private *id_priv; 2827 int ret; 2828 2829 id_priv = container_of(id, struct rdma_id_private, id); 2830 2831 id_priv->owner = task_pid_nr(current); 2832 2833 if (!cma_comp(id_priv, RDMA_CM_CONNECT)) 2834 return -EINVAL; 2835 2836 if (!id->qp && conn_param) { 2837 id_priv->qp_num = conn_param->qp_num; 2838 id_priv->srq = conn_param->srq; 2839 } 2840 2841 switch (rdma_node_get_transport(id->device->node_type)) { 2842 case RDMA_TRANSPORT_IB: 2843 if (id->qp_type == IB_QPT_UD) { 2844 if (conn_param) 2845 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 2846 conn_param->private_data, 2847 conn_param->private_data_len); 2848 else 2849 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 2850 NULL, 0); 2851 } else { 2852 if (conn_param) 2853 ret = cma_accept_ib(id_priv, conn_param); 2854 else 2855 ret = cma_rep_recv(id_priv); 2856 } 2857 break; 2858 case RDMA_TRANSPORT_IWARP: 2859 ret = cma_accept_iw(id_priv, conn_param); 2860 break; 2861 default: 2862 ret = -ENOSYS; 2863 break; 2864 } 2865 2866 if (ret) 2867 goto reject; 2868 2869 return 0; 2870 reject: 2871 cma_modify_qp_err(id_priv); 2872 rdma_reject(id, NULL, 0); 2873 return ret; 2874 } 2875 EXPORT_SYMBOL(rdma_accept); 2876 2877 int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event) 2878 { 2879 struct rdma_id_private *id_priv; 2880 int ret; 2881 2882 id_priv = container_of(id, struct rdma_id_private, id); 2883 if (!id_priv->cm_id.ib) 2884 return -EINVAL; 2885 2886 switch (id->device->node_type) { 2887 case RDMA_NODE_IB_CA: 2888 ret = ib_cm_notify(id_priv->cm_id.ib, event); 2889 break; 2890 default: 2891 ret = 0; 2892 break; 2893 } 2894 return ret; 2895 } 2896 EXPORT_SYMBOL(rdma_notify); 2897 2898 int rdma_reject(struct rdma_cm_id *id, const void *private_data, 2899 u8 private_data_len) 2900 { 2901 struct rdma_id_private *id_priv; 2902 int ret; 2903 2904 id_priv = container_of(id, struct rdma_id_private, id); 2905 if (!id_priv->cm_id.ib) 2906 return -EINVAL; 2907 2908 switch (rdma_node_get_transport(id->device->node_type)) { 2909 case RDMA_TRANSPORT_IB: 2910 if (id->qp_type == IB_QPT_UD) 2911 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 2912 private_data, private_data_len); 2913 else 2914 ret = ib_send_cm_rej(id_priv->cm_id.ib, 2915 IB_CM_REJ_CONSUMER_DEFINED, NULL, 2916 0, private_data, private_data_len); 2917 break; 2918 case RDMA_TRANSPORT_IWARP: 2919 ret = iw_cm_reject(id_priv->cm_id.iw, 2920 private_data, private_data_len); 2921 break; 2922 default: 2923 ret = -ENOSYS; 2924 break; 2925 } 2926 return ret; 2927 } 2928 EXPORT_SYMBOL(rdma_reject); 2929 2930 int rdma_disconnect(struct rdma_cm_id *id) 2931 { 2932 struct rdma_id_private *id_priv; 2933 int ret; 2934 2935 id_priv = container_of(id, struct rdma_id_private, id); 2936 if (!id_priv->cm_id.ib) 2937 return -EINVAL; 2938 2939 switch (rdma_node_get_transport(id->device->node_type)) { 2940 case RDMA_TRANSPORT_IB: 2941 ret = cma_modify_qp_err(id_priv); 2942 if (ret) 2943 goto out; 2944 /* Initiate or respond to a disconnect. */ 2945 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) 2946 ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0); 2947 break; 2948 case RDMA_TRANSPORT_IWARP: 2949 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); 2950 break; 2951 default: 2952 ret = -EINVAL; 2953 break; 2954 } 2955 out: 2956 return ret; 2957 } 2958 EXPORT_SYMBOL(rdma_disconnect); 2959 2960 static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) 2961 { 2962 struct rdma_id_private *id_priv; 2963 struct cma_multicast *mc = multicast->context; 2964 struct rdma_cm_event event; 2965 int ret; 2966 2967 id_priv = mc->id_priv; 2968 if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) && 2969 cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED)) 2970 return 0; 2971 2972 mutex_lock(&id_priv->qp_mutex); 2973 if (!status && id_priv->id.qp) 2974 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid, 2975 be16_to_cpu(multicast->rec.mlid)); 2976 mutex_unlock(&id_priv->qp_mutex); 2977 2978 memset(&event, 0, sizeof event); 2979 event.status = status; 2980 event.param.ud.private_data = mc->context; 2981 if (!status) { 2982 event.event = RDMA_CM_EVENT_MULTICAST_JOIN; 2983 ib_init_ah_from_mcmember(id_priv->id.device, 2984 id_priv->id.port_num, &multicast->rec, 2985 &event.param.ud.ah_attr); 2986 event.param.ud.qp_num = 0xFFFFFF; 2987 event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey); 2988 } else 2989 event.event = RDMA_CM_EVENT_MULTICAST_ERROR; 2990 2991 ret = id_priv->id.event_handler(&id_priv->id, &event); 2992 if (ret) { 2993 cma_exch(id_priv, RDMA_CM_DESTROYING); 2994 mutex_unlock(&id_priv->handler_mutex); 2995 rdma_destroy_id(&id_priv->id); 2996 return 0; 2997 } 2998 2999 mutex_unlock(&id_priv->handler_mutex); 3000 return 0; 3001 } 3002 3003 static void cma_set_mgid(struct rdma_id_private *id_priv, 3004 struct sockaddr *addr, union ib_gid *mgid) 3005 { 3006 unsigned char mc_map[MAX_ADDR_LEN]; 3007 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 3008 struct sockaddr_in *sin = (struct sockaddr_in *) addr; 3009 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr; 3010 3011 if (cma_any_addr(addr)) { 3012 memset(mgid, 0, sizeof *mgid); 3013 } else if ((addr->sa_family == AF_INET6) && 3014 ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) == 3015 0xFF10A01B)) { 3016 /* IPv6 address is an SA assigned MGID. */ 3017 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); 3018 } else if ((addr->sa_family == AF_INET6)) { 3019 ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map); 3020 if (id_priv->id.ps == RDMA_PS_UDP) 3021 mc_map[7] = 0x01; /* Use RDMA CM signature */ 3022 *mgid = *(union ib_gid *) (mc_map + 4); 3023 } else { 3024 ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map); 3025 if (id_priv->id.ps == RDMA_PS_UDP) 3026 mc_map[7] = 0x01; /* Use RDMA CM signature */ 3027 *mgid = *(union ib_gid *) (mc_map + 4); 3028 } 3029 } 3030 3031 static int cma_join_ib_multicast(struct rdma_id_private *id_priv, 3032 struct cma_multicast *mc) 3033 { 3034 struct ib_sa_mcmember_rec rec; 3035 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 3036 ib_sa_comp_mask comp_mask; 3037 int ret; 3038 3039 ib_addr_get_mgid(dev_addr, &rec.mgid); 3040 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num, 3041 &rec.mgid, &rec); 3042 if (ret) 3043 return ret; 3044 3045 cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid); 3046 if (id_priv->id.ps == RDMA_PS_UDP) 3047 rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); 3048 rdma_addr_get_sgid(dev_addr, &rec.port_gid); 3049 rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 3050 rec.join_state = 1; 3051 3052 comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | 3053 IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE | 3054 IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL | 3055 IB_SA_MCMEMBER_REC_FLOW_LABEL | 3056 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; 3057 3058 if (id_priv->id.ps == RDMA_PS_IPOIB) 3059 comp_mask |= IB_SA_MCMEMBER_REC_RATE | 3060 IB_SA_MCMEMBER_REC_RATE_SELECTOR | 3061 IB_SA_MCMEMBER_REC_MTU_SELECTOR | 3062 IB_SA_MCMEMBER_REC_MTU | 3063 IB_SA_MCMEMBER_REC_HOP_LIMIT; 3064 3065 mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device, 3066 id_priv->id.port_num, &rec, 3067 comp_mask, GFP_KERNEL, 3068 cma_ib_mc_handler, mc); 3069 return PTR_RET(mc->multicast.ib); 3070 } 3071 3072 static void iboe_mcast_work_handler(struct work_struct *work) 3073 { 3074 struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work); 3075 struct cma_multicast *mc = mw->mc; 3076 struct ib_sa_multicast *m = mc->multicast.ib; 3077 3078 mc->multicast.ib->context = mc; 3079 cma_ib_mc_handler(0, m); 3080 kref_put(&mc->mcref, release_mc); 3081 kfree(mw); 3082 } 3083 3084 static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid) 3085 { 3086 struct sockaddr_in *sin = (struct sockaddr_in *)addr; 3087 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr; 3088 3089 if (cma_any_addr(addr)) { 3090 memset(mgid, 0, sizeof *mgid); 3091 } else if (addr->sa_family == AF_INET6) { 3092 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); 3093 } else { 3094 mgid->raw[0] = 0xff; 3095 mgid->raw[1] = 0x0e; 3096 mgid->raw[2] = 0; 3097 mgid->raw[3] = 0; 3098 mgid->raw[4] = 0; 3099 mgid->raw[5] = 0; 3100 mgid->raw[6] = 0; 3101 mgid->raw[7] = 0; 3102 mgid->raw[8] = 0; 3103 mgid->raw[9] = 0; 3104 mgid->raw[10] = 0xff; 3105 mgid->raw[11] = 0xff; 3106 *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr; 3107 } 3108 } 3109 3110 static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, 3111 struct cma_multicast *mc) 3112 { 3113 struct iboe_mcast_work *work; 3114 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 3115 int err; 3116 struct sockaddr *addr = (struct sockaddr *)&mc->addr; 3117 struct net_device *ndev = NULL; 3118 3119 if (cma_zero_addr((struct sockaddr *)&mc->addr)) 3120 return -EINVAL; 3121 3122 work = kzalloc(sizeof *work, GFP_KERNEL); 3123 if (!work) 3124 return -ENOMEM; 3125 3126 mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL); 3127 if (!mc->multicast.ib) { 3128 err = -ENOMEM; 3129 goto out1; 3130 } 3131 3132 cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid); 3133 3134 mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff); 3135 if (id_priv->id.ps == RDMA_PS_UDP) 3136 mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); 3137 3138 if (dev_addr->bound_dev_if) 3139 ndev = dev_get_by_index(&init_net, dev_addr->bound_dev_if); 3140 if (!ndev) { 3141 err = -ENODEV; 3142 goto out2; 3143 } 3144 mc->multicast.ib->rec.rate = iboe_get_rate(ndev); 3145 mc->multicast.ib->rec.hop_limit = 1; 3146 mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->mtu); 3147 dev_put(ndev); 3148 if (!mc->multicast.ib->rec.mtu) { 3149 err = -EINVAL; 3150 goto out2; 3151 } 3152 iboe_addr_get_sgid(dev_addr, &mc->multicast.ib->rec.port_gid); 3153 work->id = id_priv; 3154 work->mc = mc; 3155 INIT_WORK(&work->work, iboe_mcast_work_handler); 3156 kref_get(&mc->mcref); 3157 queue_work(cma_wq, &work->work); 3158 3159 return 0; 3160 3161 out2: 3162 kfree(mc->multicast.ib); 3163 out1: 3164 kfree(work); 3165 return err; 3166 } 3167 3168 int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, 3169 void *context) 3170 { 3171 struct rdma_id_private *id_priv; 3172 struct cma_multicast *mc; 3173 int ret; 3174 3175 id_priv = container_of(id, struct rdma_id_private, id); 3176 if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) && 3177 !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED)) 3178 return -EINVAL; 3179 3180 mc = kmalloc(sizeof *mc, GFP_KERNEL); 3181 if (!mc) 3182 return -ENOMEM; 3183 3184 memcpy(&mc->addr, addr, ip_addr_size(addr)); 3185 mc->context = context; 3186 mc->id_priv = id_priv; 3187 3188 spin_lock(&id_priv->lock); 3189 list_add(&mc->list, &id_priv->mc_list); 3190 spin_unlock(&id_priv->lock); 3191 3192 switch (rdma_node_get_transport(id->device->node_type)) { 3193 case RDMA_TRANSPORT_IB: 3194 switch (rdma_port_get_link_layer(id->device, id->port_num)) { 3195 case IB_LINK_LAYER_INFINIBAND: 3196 ret = cma_join_ib_multicast(id_priv, mc); 3197 break; 3198 case IB_LINK_LAYER_ETHERNET: 3199 kref_init(&mc->mcref); 3200 ret = cma_iboe_join_multicast(id_priv, mc); 3201 break; 3202 default: 3203 ret = -EINVAL; 3204 } 3205 break; 3206 default: 3207 ret = -ENOSYS; 3208 break; 3209 } 3210 3211 if (ret) { 3212 spin_lock_irq(&id_priv->lock); 3213 list_del(&mc->list); 3214 spin_unlock_irq(&id_priv->lock); 3215 kfree(mc); 3216 } 3217 return ret; 3218 } 3219 EXPORT_SYMBOL(rdma_join_multicast); 3220 3221 void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) 3222 { 3223 struct rdma_id_private *id_priv; 3224 struct cma_multicast *mc; 3225 3226 id_priv = container_of(id, struct rdma_id_private, id); 3227 spin_lock_irq(&id_priv->lock); 3228 list_for_each_entry(mc, &id_priv->mc_list, list) { 3229 if (!memcmp(&mc->addr, addr, ip_addr_size(addr))) { 3230 list_del(&mc->list); 3231 spin_unlock_irq(&id_priv->lock); 3232 3233 if (id->qp) 3234 ib_detach_mcast(id->qp, 3235 &mc->multicast.ib->rec.mgid, 3236 be16_to_cpu(mc->multicast.ib->rec.mlid)); 3237 if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) { 3238 switch (rdma_port_get_link_layer(id->device, id->port_num)) { 3239 case IB_LINK_LAYER_INFINIBAND: 3240 ib_sa_free_multicast(mc->multicast.ib); 3241 kfree(mc); 3242 break; 3243 case IB_LINK_LAYER_ETHERNET: 3244 kref_put(&mc->mcref, release_mc); 3245 break; 3246 default: 3247 break; 3248 } 3249 } 3250 return; 3251 } 3252 } 3253 spin_unlock_irq(&id_priv->lock); 3254 } 3255 EXPORT_SYMBOL(rdma_leave_multicast); 3256 3257 static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv) 3258 { 3259 struct rdma_dev_addr *dev_addr; 3260 struct cma_ndev_work *work; 3261 3262 dev_addr = &id_priv->id.route.addr.dev_addr; 3263 3264 if ((dev_addr->bound_dev_if == ndev->ifindex) && 3265 memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) { 3266 printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n", 3267 ndev->name, &id_priv->id); 3268 work = kzalloc(sizeof *work, GFP_KERNEL); 3269 if (!work) 3270 return -ENOMEM; 3271 3272 INIT_WORK(&work->work, cma_ndev_work_handler); 3273 work->id = id_priv; 3274 work->event.event = RDMA_CM_EVENT_ADDR_CHANGE; 3275 atomic_inc(&id_priv->refcount); 3276 queue_work(cma_wq, &work->work); 3277 } 3278 3279 return 0; 3280 } 3281 3282 static int cma_netdev_callback(struct notifier_block *self, unsigned long event, 3283 void *ctx) 3284 { 3285 struct net_device *ndev = (struct net_device *)ctx; 3286 struct cma_device *cma_dev; 3287 struct rdma_id_private *id_priv; 3288 int ret = NOTIFY_DONE; 3289 3290 if (dev_net(ndev) != &init_net) 3291 return NOTIFY_DONE; 3292 3293 if (event != NETDEV_BONDING_FAILOVER) 3294 return NOTIFY_DONE; 3295 3296 if (!(ndev->flags & IFF_MASTER) || !(ndev->priv_flags & IFF_BONDING)) 3297 return NOTIFY_DONE; 3298 3299 mutex_lock(&lock); 3300 list_for_each_entry(cma_dev, &dev_list, list) 3301 list_for_each_entry(id_priv, &cma_dev->id_list, list) { 3302 ret = cma_netdev_change(ndev, id_priv); 3303 if (ret) 3304 goto out; 3305 } 3306 3307 out: 3308 mutex_unlock(&lock); 3309 return ret; 3310 } 3311 3312 static struct notifier_block cma_nb = { 3313 .notifier_call = cma_netdev_callback 3314 }; 3315 3316 static void cma_add_one(struct ib_device *device) 3317 { 3318 struct cma_device *cma_dev; 3319 struct rdma_id_private *id_priv; 3320 3321 cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL); 3322 if (!cma_dev) 3323 return; 3324 3325 cma_dev->device = device; 3326 3327 init_completion(&cma_dev->comp); 3328 atomic_set(&cma_dev->refcount, 1); 3329 INIT_LIST_HEAD(&cma_dev->id_list); 3330 ib_set_client_data(device, &cma_client, cma_dev); 3331 3332 mutex_lock(&lock); 3333 list_add_tail(&cma_dev->list, &dev_list); 3334 list_for_each_entry(id_priv, &listen_any_list, list) 3335 cma_listen_on_dev(id_priv, cma_dev); 3336 mutex_unlock(&lock); 3337 } 3338 3339 static int cma_remove_id_dev(struct rdma_id_private *id_priv) 3340 { 3341 struct rdma_cm_event event; 3342 enum rdma_cm_state state; 3343 int ret = 0; 3344 3345 /* Record that we want to remove the device */ 3346 state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL); 3347 if (state == RDMA_CM_DESTROYING) 3348 return 0; 3349 3350 cma_cancel_operation(id_priv, state); 3351 mutex_lock(&id_priv->handler_mutex); 3352 3353 /* Check for destruction from another callback. */ 3354 if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL)) 3355 goto out; 3356 3357 memset(&event, 0, sizeof event); 3358 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL; 3359 ret = id_priv->id.event_handler(&id_priv->id, &event); 3360 out: 3361 mutex_unlock(&id_priv->handler_mutex); 3362 return ret; 3363 } 3364 3365 static void cma_process_remove(struct cma_device *cma_dev) 3366 { 3367 struct rdma_id_private *id_priv; 3368 int ret; 3369 3370 mutex_lock(&lock); 3371 while (!list_empty(&cma_dev->id_list)) { 3372 id_priv = list_entry(cma_dev->id_list.next, 3373 struct rdma_id_private, list); 3374 3375 list_del(&id_priv->listen_list); 3376 list_del_init(&id_priv->list); 3377 atomic_inc(&id_priv->refcount); 3378 mutex_unlock(&lock); 3379 3380 ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv); 3381 cma_deref_id(id_priv); 3382 if (ret) 3383 rdma_destroy_id(&id_priv->id); 3384 3385 mutex_lock(&lock); 3386 } 3387 mutex_unlock(&lock); 3388 3389 cma_deref_dev(cma_dev); 3390 wait_for_completion(&cma_dev->comp); 3391 } 3392 3393 static void cma_remove_one(struct ib_device *device) 3394 { 3395 struct cma_device *cma_dev; 3396 3397 cma_dev = ib_get_client_data(device, &cma_client); 3398 if (!cma_dev) 3399 return; 3400 3401 mutex_lock(&lock); 3402 list_del(&cma_dev->list); 3403 mutex_unlock(&lock); 3404 3405 cma_process_remove(cma_dev); 3406 kfree(cma_dev); 3407 } 3408 3409 static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb) 3410 { 3411 struct nlmsghdr *nlh; 3412 struct rdma_cm_id_stats *id_stats; 3413 struct rdma_id_private *id_priv; 3414 struct rdma_cm_id *id = NULL; 3415 struct cma_device *cma_dev; 3416 int i_dev = 0, i_id = 0; 3417 3418 /* 3419 * We export all of the IDs as a sequence of messages. Each 3420 * ID gets its own netlink message. 3421 */ 3422 mutex_lock(&lock); 3423 3424 list_for_each_entry(cma_dev, &dev_list, list) { 3425 if (i_dev < cb->args[0]) { 3426 i_dev++; 3427 continue; 3428 } 3429 3430 i_id = 0; 3431 list_for_each_entry(id_priv, &cma_dev->id_list, list) { 3432 if (i_id < cb->args[1]) { 3433 i_id++; 3434 continue; 3435 } 3436 3437 id_stats = ibnl_put_msg(skb, &nlh, cb->nlh->nlmsg_seq, 3438 sizeof *id_stats, RDMA_NL_RDMA_CM, 3439 RDMA_NL_RDMA_CM_ID_STATS); 3440 if (!id_stats) 3441 goto out; 3442 3443 memset(id_stats, 0, sizeof *id_stats); 3444 id = &id_priv->id; 3445 id_stats->node_type = id->route.addr.dev_addr.dev_type; 3446 id_stats->port_num = id->port_num; 3447 id_stats->bound_dev_if = 3448 id->route.addr.dev_addr.bound_dev_if; 3449 3450 if (id->route.addr.src_addr.ss_family == AF_INET) { 3451 if (ibnl_put_attr(skb, nlh, 3452 sizeof(struct sockaddr_in), 3453 &id->route.addr.src_addr, 3454 RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) { 3455 goto out; 3456 } 3457 if (ibnl_put_attr(skb, nlh, 3458 sizeof(struct sockaddr_in), 3459 &id->route.addr.dst_addr, 3460 RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) { 3461 goto out; 3462 } 3463 } else if (id->route.addr.src_addr.ss_family == AF_INET6) { 3464 if (ibnl_put_attr(skb, nlh, 3465 sizeof(struct sockaddr_in6), 3466 &id->route.addr.src_addr, 3467 RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) { 3468 goto out; 3469 } 3470 if (ibnl_put_attr(skb, nlh, 3471 sizeof(struct sockaddr_in6), 3472 &id->route.addr.dst_addr, 3473 RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) { 3474 goto out; 3475 } 3476 } 3477 3478 id_stats->pid = id_priv->owner; 3479 id_stats->port_space = id->ps; 3480 id_stats->cm_state = id_priv->state; 3481 id_stats->qp_num = id_priv->qp_num; 3482 id_stats->qp_type = id->qp_type; 3483 3484 i_id++; 3485 } 3486 3487 cb->args[1] = 0; 3488 i_dev++; 3489 } 3490 3491 out: 3492 mutex_unlock(&lock); 3493 cb->args[0] = i_dev; 3494 cb->args[1] = i_id; 3495 3496 return skb->len; 3497 } 3498 3499 static const struct ibnl_client_cbs cma_cb_table[] = { 3500 [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats, 3501 .module = THIS_MODULE }, 3502 }; 3503 3504 static int __init cma_init(void) 3505 { 3506 int ret; 3507 3508 cma_wq = create_singlethread_workqueue("rdma_cm"); 3509 if (!cma_wq) 3510 return -ENOMEM; 3511 3512 ib_sa_register_client(&sa_client); 3513 rdma_addr_register_client(&addr_client); 3514 register_netdevice_notifier(&cma_nb); 3515 3516 ret = ib_register_client(&cma_client); 3517 if (ret) 3518 goto err; 3519 3520 if (ibnl_add_client(RDMA_NL_RDMA_CM, RDMA_NL_RDMA_CM_NUM_OPS, cma_cb_table)) 3521 printk(KERN_WARNING "RDMA CMA: failed to add netlink callback\n"); 3522 3523 return 0; 3524 3525 err: 3526 unregister_netdevice_notifier(&cma_nb); 3527 rdma_addr_unregister_client(&addr_client); 3528 ib_sa_unregister_client(&sa_client); 3529 destroy_workqueue(cma_wq); 3530 return ret; 3531 } 3532 3533 static void __exit cma_cleanup(void) 3534 { 3535 ibnl_remove_client(RDMA_NL_RDMA_CM); 3536 ib_unregister_client(&cma_client); 3537 unregister_netdevice_notifier(&cma_nb); 3538 rdma_addr_unregister_client(&addr_client); 3539 ib_sa_unregister_client(&sa_client); 3540 destroy_workqueue(cma_wq); 3541 idr_destroy(&sdp_ps); 3542 idr_destroy(&tcp_ps); 3543 idr_destroy(&udp_ps); 3544 idr_destroy(&ipoib_ps); 3545 idr_destroy(&ib_ps); 3546 } 3547 3548 module_init(cma_init); 3549 module_exit(cma_cleanup); 3550