1 /* 2 * Copyright (c) 2005 Voltaire Inc. All rights reserved. 3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved. 4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved. 5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/completion.h> 37 #include <linux/in.h> 38 #include <linux/in6.h> 39 #include <linux/mutex.h> 40 #include <linux/random.h> 41 #include <linux/idr.h> 42 #include <linux/inetdevice.h> 43 44 #include <net/tcp.h> 45 #include <net/ipv6.h> 46 47 #include <rdma/rdma_cm.h> 48 #include <rdma/rdma_cm_ib.h> 49 #include <rdma/ib_cache.h> 50 #include <rdma/ib_cm.h> 51 #include <rdma/ib_sa.h> 52 #include <rdma/iw_cm.h> 53 54 MODULE_AUTHOR("Sean Hefty"); 55 MODULE_DESCRIPTION("Generic RDMA CM Agent"); 56 MODULE_LICENSE("Dual BSD/GPL"); 57 58 #define CMA_CM_RESPONSE_TIMEOUT 20 59 #define CMA_MAX_CM_RETRIES 15 60 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) 61 62 static void cma_add_one(struct ib_device *device); 63 static void cma_remove_one(struct ib_device *device); 64 65 static struct ib_client cma_client = { 66 .name = "cma", 67 .add = cma_add_one, 68 .remove = cma_remove_one 69 }; 70 71 static struct ib_sa_client sa_client; 72 static struct rdma_addr_client addr_client; 73 static LIST_HEAD(dev_list); 74 static LIST_HEAD(listen_any_list); 75 static DEFINE_MUTEX(lock); 76 static struct workqueue_struct *cma_wq; 77 static DEFINE_IDR(sdp_ps); 78 static DEFINE_IDR(tcp_ps); 79 static DEFINE_IDR(udp_ps); 80 static DEFINE_IDR(ipoib_ps); 81 static int next_port; 82 83 struct cma_device { 84 struct list_head list; 85 struct ib_device *device; 86 struct completion comp; 87 atomic_t refcount; 88 struct list_head id_list; 89 }; 90 91 enum cma_state { 92 CMA_IDLE, 93 CMA_ADDR_QUERY, 94 CMA_ADDR_RESOLVED, 95 CMA_ROUTE_QUERY, 96 CMA_ROUTE_RESOLVED, 97 CMA_CONNECT, 98 CMA_DISCONNECT, 99 CMA_ADDR_BOUND, 100 CMA_LISTEN, 101 CMA_DEVICE_REMOVAL, 102 CMA_DESTROYING 103 }; 104 105 struct rdma_bind_list { 106 struct idr *ps; 107 struct hlist_head owners; 108 unsigned short port; 109 }; 110 111 /* 112 * Device removal can occur at anytime, so we need extra handling to 113 * serialize notifying the user of device removal with other callbacks. 114 * We do this by disabling removal notification while a callback is in process, 115 * and reporting it after the callback completes. 116 */ 117 struct rdma_id_private { 118 struct rdma_cm_id id; 119 120 struct rdma_bind_list *bind_list; 121 struct hlist_node node; 122 struct list_head list; /* listen_any_list or cma_device.list */ 123 struct list_head listen_list; /* per device listens */ 124 struct cma_device *cma_dev; 125 struct list_head mc_list; 126 127 int internal_id; 128 enum cma_state state; 129 spinlock_t lock; 130 struct mutex qp_mutex; 131 132 struct completion comp; 133 atomic_t refcount; 134 struct mutex handler_mutex; 135 136 int backlog; 137 int timeout_ms; 138 struct ib_sa_query *query; 139 int query_id; 140 union { 141 struct ib_cm_id *ib; 142 struct iw_cm_id *iw; 143 } cm_id; 144 145 u32 seq_num; 146 u32 qkey; 147 u32 qp_num; 148 u8 srq; 149 u8 tos; 150 }; 151 152 struct cma_multicast { 153 struct rdma_id_private *id_priv; 154 union { 155 struct ib_sa_multicast *ib; 156 } multicast; 157 struct list_head list; 158 void *context; 159 struct sockaddr_storage addr; 160 }; 161 162 struct cma_work { 163 struct work_struct work; 164 struct rdma_id_private *id; 165 enum cma_state old_state; 166 enum cma_state new_state; 167 struct rdma_cm_event event; 168 }; 169 170 struct cma_ndev_work { 171 struct work_struct work; 172 struct rdma_id_private *id; 173 struct rdma_cm_event event; 174 }; 175 176 union cma_ip_addr { 177 struct in6_addr ip6; 178 struct { 179 __be32 pad[3]; 180 __be32 addr; 181 } ip4; 182 }; 183 184 struct cma_hdr { 185 u8 cma_version; 186 u8 ip_version; /* IP version: 7:4 */ 187 __be16 port; 188 union cma_ip_addr src_addr; 189 union cma_ip_addr dst_addr; 190 }; 191 192 struct sdp_hh { 193 u8 bsdh[16]; 194 u8 sdp_version; /* Major version: 7:4 */ 195 u8 ip_version; /* IP version: 7:4 */ 196 u8 sdp_specific1[10]; 197 __be16 port; 198 __be16 sdp_specific2; 199 union cma_ip_addr src_addr; 200 union cma_ip_addr dst_addr; 201 }; 202 203 struct sdp_hah { 204 u8 bsdh[16]; 205 u8 sdp_version; 206 }; 207 208 #define CMA_VERSION 0x00 209 #define SDP_MAJ_VERSION 0x2 210 211 static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp) 212 { 213 unsigned long flags; 214 int ret; 215 216 spin_lock_irqsave(&id_priv->lock, flags); 217 ret = (id_priv->state == comp); 218 spin_unlock_irqrestore(&id_priv->lock, flags); 219 return ret; 220 } 221 222 static int cma_comp_exch(struct rdma_id_private *id_priv, 223 enum cma_state comp, enum cma_state exch) 224 { 225 unsigned long flags; 226 int ret; 227 228 spin_lock_irqsave(&id_priv->lock, flags); 229 if ((ret = (id_priv->state == comp))) 230 id_priv->state = exch; 231 spin_unlock_irqrestore(&id_priv->lock, flags); 232 return ret; 233 } 234 235 static enum cma_state cma_exch(struct rdma_id_private *id_priv, 236 enum cma_state exch) 237 { 238 unsigned long flags; 239 enum cma_state old; 240 241 spin_lock_irqsave(&id_priv->lock, flags); 242 old = id_priv->state; 243 id_priv->state = exch; 244 spin_unlock_irqrestore(&id_priv->lock, flags); 245 return old; 246 } 247 248 static inline u8 cma_get_ip_ver(struct cma_hdr *hdr) 249 { 250 return hdr->ip_version >> 4; 251 } 252 253 static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver) 254 { 255 hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF); 256 } 257 258 static inline u8 sdp_get_majv(u8 sdp_version) 259 { 260 return sdp_version >> 4; 261 } 262 263 static inline u8 sdp_get_ip_ver(struct sdp_hh *hh) 264 { 265 return hh->ip_version >> 4; 266 } 267 268 static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver) 269 { 270 hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF); 271 } 272 273 static inline int cma_is_ud_ps(enum rdma_port_space ps) 274 { 275 return (ps == RDMA_PS_UDP || ps == RDMA_PS_IPOIB); 276 } 277 278 static void cma_attach_to_dev(struct rdma_id_private *id_priv, 279 struct cma_device *cma_dev) 280 { 281 atomic_inc(&cma_dev->refcount); 282 id_priv->cma_dev = cma_dev; 283 id_priv->id.device = cma_dev->device; 284 list_add_tail(&id_priv->list, &cma_dev->id_list); 285 } 286 287 static inline void cma_deref_dev(struct cma_device *cma_dev) 288 { 289 if (atomic_dec_and_test(&cma_dev->refcount)) 290 complete(&cma_dev->comp); 291 } 292 293 static void cma_detach_from_dev(struct rdma_id_private *id_priv) 294 { 295 list_del(&id_priv->list); 296 cma_deref_dev(id_priv->cma_dev); 297 id_priv->cma_dev = NULL; 298 } 299 300 static int cma_set_qkey(struct ib_device *device, u8 port_num, 301 enum rdma_port_space ps, 302 struct rdma_dev_addr *dev_addr, u32 *qkey) 303 { 304 struct ib_sa_mcmember_rec rec; 305 int ret = 0; 306 307 switch (ps) { 308 case RDMA_PS_UDP: 309 *qkey = RDMA_UDP_QKEY; 310 break; 311 case RDMA_PS_IPOIB: 312 ib_addr_get_mgid(dev_addr, &rec.mgid); 313 ret = ib_sa_get_mcmember_rec(device, port_num, &rec.mgid, &rec); 314 *qkey = be32_to_cpu(rec.qkey); 315 break; 316 default: 317 break; 318 } 319 return ret; 320 } 321 322 static int cma_acquire_dev(struct rdma_id_private *id_priv) 323 { 324 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 325 struct cma_device *cma_dev; 326 union ib_gid gid; 327 int ret = -ENODEV; 328 329 switch (rdma_node_get_transport(dev_addr->dev_type)) { 330 case RDMA_TRANSPORT_IB: 331 ib_addr_get_sgid(dev_addr, &gid); 332 break; 333 case RDMA_TRANSPORT_IWARP: 334 iw_addr_get_sgid(dev_addr, &gid); 335 break; 336 default: 337 return -ENODEV; 338 } 339 340 list_for_each_entry(cma_dev, &dev_list, list) { 341 ret = ib_find_cached_gid(cma_dev->device, &gid, 342 &id_priv->id.port_num, NULL); 343 if (!ret) { 344 ret = cma_set_qkey(cma_dev->device, 345 id_priv->id.port_num, 346 id_priv->id.ps, dev_addr, 347 &id_priv->qkey); 348 if (!ret) 349 cma_attach_to_dev(id_priv, cma_dev); 350 break; 351 } 352 } 353 return ret; 354 } 355 356 static void cma_deref_id(struct rdma_id_private *id_priv) 357 { 358 if (atomic_dec_and_test(&id_priv->refcount)) 359 complete(&id_priv->comp); 360 } 361 362 static int cma_disable_callback(struct rdma_id_private *id_priv, 363 enum cma_state state) 364 { 365 mutex_lock(&id_priv->handler_mutex); 366 if (id_priv->state != state) { 367 mutex_unlock(&id_priv->handler_mutex); 368 return -EINVAL; 369 } 370 return 0; 371 } 372 373 static int cma_has_cm_dev(struct rdma_id_private *id_priv) 374 { 375 return (id_priv->id.device && id_priv->cm_id.ib); 376 } 377 378 struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, 379 void *context, enum rdma_port_space ps) 380 { 381 struct rdma_id_private *id_priv; 382 383 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL); 384 if (!id_priv) 385 return ERR_PTR(-ENOMEM); 386 387 id_priv->state = CMA_IDLE; 388 id_priv->id.context = context; 389 id_priv->id.event_handler = event_handler; 390 id_priv->id.ps = ps; 391 spin_lock_init(&id_priv->lock); 392 mutex_init(&id_priv->qp_mutex); 393 init_completion(&id_priv->comp); 394 atomic_set(&id_priv->refcount, 1); 395 mutex_init(&id_priv->handler_mutex); 396 INIT_LIST_HEAD(&id_priv->listen_list); 397 INIT_LIST_HEAD(&id_priv->mc_list); 398 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); 399 400 return &id_priv->id; 401 } 402 EXPORT_SYMBOL(rdma_create_id); 403 404 static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 405 { 406 struct ib_qp_attr qp_attr; 407 int qp_attr_mask, ret; 408 409 qp_attr.qp_state = IB_QPS_INIT; 410 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 411 if (ret) 412 return ret; 413 414 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); 415 if (ret) 416 return ret; 417 418 qp_attr.qp_state = IB_QPS_RTR; 419 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE); 420 if (ret) 421 return ret; 422 423 qp_attr.qp_state = IB_QPS_RTS; 424 qp_attr.sq_psn = 0; 425 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN); 426 427 return ret; 428 } 429 430 static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 431 { 432 struct ib_qp_attr qp_attr; 433 int qp_attr_mask, ret; 434 435 qp_attr.qp_state = IB_QPS_INIT; 436 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 437 if (ret) 438 return ret; 439 440 return ib_modify_qp(qp, &qp_attr, qp_attr_mask); 441 } 442 443 int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, 444 struct ib_qp_init_attr *qp_init_attr) 445 { 446 struct rdma_id_private *id_priv; 447 struct ib_qp *qp; 448 int ret; 449 450 id_priv = container_of(id, struct rdma_id_private, id); 451 if (id->device != pd->device) 452 return -EINVAL; 453 454 qp = ib_create_qp(pd, qp_init_attr); 455 if (IS_ERR(qp)) 456 return PTR_ERR(qp); 457 458 if (cma_is_ud_ps(id_priv->id.ps)) 459 ret = cma_init_ud_qp(id_priv, qp); 460 else 461 ret = cma_init_conn_qp(id_priv, qp); 462 if (ret) 463 goto err; 464 465 id->qp = qp; 466 id_priv->qp_num = qp->qp_num; 467 id_priv->srq = (qp->srq != NULL); 468 return 0; 469 err: 470 ib_destroy_qp(qp); 471 return ret; 472 } 473 EXPORT_SYMBOL(rdma_create_qp); 474 475 void rdma_destroy_qp(struct rdma_cm_id *id) 476 { 477 struct rdma_id_private *id_priv; 478 479 id_priv = container_of(id, struct rdma_id_private, id); 480 mutex_lock(&id_priv->qp_mutex); 481 ib_destroy_qp(id_priv->id.qp); 482 id_priv->id.qp = NULL; 483 mutex_unlock(&id_priv->qp_mutex); 484 } 485 EXPORT_SYMBOL(rdma_destroy_qp); 486 487 static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, 488 struct rdma_conn_param *conn_param) 489 { 490 struct ib_qp_attr qp_attr; 491 int qp_attr_mask, ret; 492 493 mutex_lock(&id_priv->qp_mutex); 494 if (!id_priv->id.qp) { 495 ret = 0; 496 goto out; 497 } 498 499 /* Need to update QP attributes from default values. */ 500 qp_attr.qp_state = IB_QPS_INIT; 501 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 502 if (ret) 503 goto out; 504 505 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 506 if (ret) 507 goto out; 508 509 qp_attr.qp_state = IB_QPS_RTR; 510 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 511 if (ret) 512 goto out; 513 514 if (conn_param) 515 qp_attr.max_dest_rd_atomic = conn_param->responder_resources; 516 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 517 out: 518 mutex_unlock(&id_priv->qp_mutex); 519 return ret; 520 } 521 522 static int cma_modify_qp_rts(struct rdma_id_private *id_priv, 523 struct rdma_conn_param *conn_param) 524 { 525 struct ib_qp_attr qp_attr; 526 int qp_attr_mask, ret; 527 528 mutex_lock(&id_priv->qp_mutex); 529 if (!id_priv->id.qp) { 530 ret = 0; 531 goto out; 532 } 533 534 qp_attr.qp_state = IB_QPS_RTS; 535 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 536 if (ret) 537 goto out; 538 539 if (conn_param) 540 qp_attr.max_rd_atomic = conn_param->initiator_depth; 541 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 542 out: 543 mutex_unlock(&id_priv->qp_mutex); 544 return ret; 545 } 546 547 static int cma_modify_qp_err(struct rdma_id_private *id_priv) 548 { 549 struct ib_qp_attr qp_attr; 550 int ret; 551 552 mutex_lock(&id_priv->qp_mutex); 553 if (!id_priv->id.qp) { 554 ret = 0; 555 goto out; 556 } 557 558 qp_attr.qp_state = IB_QPS_ERR; 559 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE); 560 out: 561 mutex_unlock(&id_priv->qp_mutex); 562 return ret; 563 } 564 565 static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, 566 struct ib_qp_attr *qp_attr, int *qp_attr_mask) 567 { 568 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 569 int ret; 570 571 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, 572 ib_addr_get_pkey(dev_addr), 573 &qp_attr->pkey_index); 574 if (ret) 575 return ret; 576 577 qp_attr->port_num = id_priv->id.port_num; 578 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; 579 580 if (cma_is_ud_ps(id_priv->id.ps)) { 581 qp_attr->qkey = id_priv->qkey; 582 *qp_attr_mask |= IB_QP_QKEY; 583 } else { 584 qp_attr->qp_access_flags = 0; 585 *qp_attr_mask |= IB_QP_ACCESS_FLAGS; 586 } 587 return 0; 588 } 589 590 int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, 591 int *qp_attr_mask) 592 { 593 struct rdma_id_private *id_priv; 594 int ret = 0; 595 596 id_priv = container_of(id, struct rdma_id_private, id); 597 switch (rdma_node_get_transport(id_priv->id.device->node_type)) { 598 case RDMA_TRANSPORT_IB: 599 if (!id_priv->cm_id.ib || cma_is_ud_ps(id_priv->id.ps)) 600 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); 601 else 602 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, 603 qp_attr_mask); 604 if (qp_attr->qp_state == IB_QPS_RTR) 605 qp_attr->rq_psn = id_priv->seq_num; 606 break; 607 case RDMA_TRANSPORT_IWARP: 608 if (!id_priv->cm_id.iw) { 609 qp_attr->qp_access_flags = 0; 610 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; 611 } else 612 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, 613 qp_attr_mask); 614 break; 615 default: 616 ret = -ENOSYS; 617 break; 618 } 619 620 return ret; 621 } 622 EXPORT_SYMBOL(rdma_init_qp_attr); 623 624 static inline int cma_zero_addr(struct sockaddr *addr) 625 { 626 struct in6_addr *ip6; 627 628 if (addr->sa_family == AF_INET) 629 return ipv4_is_zeronet( 630 ((struct sockaddr_in *)addr)->sin_addr.s_addr); 631 else { 632 ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr; 633 return (ip6->s6_addr32[0] | ip6->s6_addr32[1] | 634 ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0; 635 } 636 } 637 638 static inline int cma_loopback_addr(struct sockaddr *addr) 639 { 640 if (addr->sa_family == AF_INET) 641 return ipv4_is_loopback( 642 ((struct sockaddr_in *) addr)->sin_addr.s_addr); 643 else 644 return ipv6_addr_loopback( 645 &((struct sockaddr_in6 *) addr)->sin6_addr); 646 } 647 648 static inline int cma_any_addr(struct sockaddr *addr) 649 { 650 return cma_zero_addr(addr) || cma_loopback_addr(addr); 651 } 652 653 static inline __be16 cma_port(struct sockaddr *addr) 654 { 655 if (addr->sa_family == AF_INET) 656 return ((struct sockaddr_in *) addr)->sin_port; 657 else 658 return ((struct sockaddr_in6 *) addr)->sin6_port; 659 } 660 661 static inline int cma_any_port(struct sockaddr *addr) 662 { 663 return !cma_port(addr); 664 } 665 666 static int cma_get_net_info(void *hdr, enum rdma_port_space ps, 667 u8 *ip_ver, __be16 *port, 668 union cma_ip_addr **src, union cma_ip_addr **dst) 669 { 670 switch (ps) { 671 case RDMA_PS_SDP: 672 if (sdp_get_majv(((struct sdp_hh *) hdr)->sdp_version) != 673 SDP_MAJ_VERSION) 674 return -EINVAL; 675 676 *ip_ver = sdp_get_ip_ver(hdr); 677 *port = ((struct sdp_hh *) hdr)->port; 678 *src = &((struct sdp_hh *) hdr)->src_addr; 679 *dst = &((struct sdp_hh *) hdr)->dst_addr; 680 break; 681 default: 682 if (((struct cma_hdr *) hdr)->cma_version != CMA_VERSION) 683 return -EINVAL; 684 685 *ip_ver = cma_get_ip_ver(hdr); 686 *port = ((struct cma_hdr *) hdr)->port; 687 *src = &((struct cma_hdr *) hdr)->src_addr; 688 *dst = &((struct cma_hdr *) hdr)->dst_addr; 689 break; 690 } 691 692 if (*ip_ver != 4 && *ip_ver != 6) 693 return -EINVAL; 694 return 0; 695 } 696 697 static void cma_save_net_info(struct rdma_addr *addr, 698 struct rdma_addr *listen_addr, 699 u8 ip_ver, __be16 port, 700 union cma_ip_addr *src, union cma_ip_addr *dst) 701 { 702 struct sockaddr_in *listen4, *ip4; 703 struct sockaddr_in6 *listen6, *ip6; 704 705 switch (ip_ver) { 706 case 4: 707 listen4 = (struct sockaddr_in *) &listen_addr->src_addr; 708 ip4 = (struct sockaddr_in *) &addr->src_addr; 709 ip4->sin_family = listen4->sin_family; 710 ip4->sin_addr.s_addr = dst->ip4.addr; 711 ip4->sin_port = listen4->sin_port; 712 713 ip4 = (struct sockaddr_in *) &addr->dst_addr; 714 ip4->sin_family = listen4->sin_family; 715 ip4->sin_addr.s_addr = src->ip4.addr; 716 ip4->sin_port = port; 717 break; 718 case 6: 719 listen6 = (struct sockaddr_in6 *) &listen_addr->src_addr; 720 ip6 = (struct sockaddr_in6 *) &addr->src_addr; 721 ip6->sin6_family = listen6->sin6_family; 722 ip6->sin6_addr = dst->ip6; 723 ip6->sin6_port = listen6->sin6_port; 724 725 ip6 = (struct sockaddr_in6 *) &addr->dst_addr; 726 ip6->sin6_family = listen6->sin6_family; 727 ip6->sin6_addr = src->ip6; 728 ip6->sin6_port = port; 729 break; 730 default: 731 break; 732 } 733 } 734 735 static inline int cma_user_data_offset(enum rdma_port_space ps) 736 { 737 switch (ps) { 738 case RDMA_PS_SDP: 739 return 0; 740 default: 741 return sizeof(struct cma_hdr); 742 } 743 } 744 745 static void cma_cancel_route(struct rdma_id_private *id_priv) 746 { 747 switch (rdma_node_get_transport(id_priv->id.device->node_type)) { 748 case RDMA_TRANSPORT_IB: 749 if (id_priv->query) 750 ib_sa_cancel_query(id_priv->query_id, id_priv->query); 751 break; 752 default: 753 break; 754 } 755 } 756 757 static void cma_cancel_listens(struct rdma_id_private *id_priv) 758 { 759 struct rdma_id_private *dev_id_priv; 760 761 /* 762 * Remove from listen_any_list to prevent added devices from spawning 763 * additional listen requests. 764 */ 765 mutex_lock(&lock); 766 list_del(&id_priv->list); 767 768 while (!list_empty(&id_priv->listen_list)) { 769 dev_id_priv = list_entry(id_priv->listen_list.next, 770 struct rdma_id_private, listen_list); 771 /* sync with device removal to avoid duplicate destruction */ 772 list_del_init(&dev_id_priv->list); 773 list_del(&dev_id_priv->listen_list); 774 mutex_unlock(&lock); 775 776 rdma_destroy_id(&dev_id_priv->id); 777 mutex_lock(&lock); 778 } 779 mutex_unlock(&lock); 780 } 781 782 static void cma_cancel_operation(struct rdma_id_private *id_priv, 783 enum cma_state state) 784 { 785 switch (state) { 786 case CMA_ADDR_QUERY: 787 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); 788 break; 789 case CMA_ROUTE_QUERY: 790 cma_cancel_route(id_priv); 791 break; 792 case CMA_LISTEN: 793 if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr) 794 && !id_priv->cma_dev) 795 cma_cancel_listens(id_priv); 796 break; 797 default: 798 break; 799 } 800 } 801 802 static void cma_release_port(struct rdma_id_private *id_priv) 803 { 804 struct rdma_bind_list *bind_list = id_priv->bind_list; 805 806 if (!bind_list) 807 return; 808 809 mutex_lock(&lock); 810 hlist_del(&id_priv->node); 811 if (hlist_empty(&bind_list->owners)) { 812 idr_remove(bind_list->ps, bind_list->port); 813 kfree(bind_list); 814 } 815 mutex_unlock(&lock); 816 } 817 818 static void cma_leave_mc_groups(struct rdma_id_private *id_priv) 819 { 820 struct cma_multicast *mc; 821 822 while (!list_empty(&id_priv->mc_list)) { 823 mc = container_of(id_priv->mc_list.next, 824 struct cma_multicast, list); 825 list_del(&mc->list); 826 ib_sa_free_multicast(mc->multicast.ib); 827 kfree(mc); 828 } 829 } 830 831 void rdma_destroy_id(struct rdma_cm_id *id) 832 { 833 struct rdma_id_private *id_priv; 834 enum cma_state state; 835 836 id_priv = container_of(id, struct rdma_id_private, id); 837 state = cma_exch(id_priv, CMA_DESTROYING); 838 cma_cancel_operation(id_priv, state); 839 840 mutex_lock(&lock); 841 if (id_priv->cma_dev) { 842 mutex_unlock(&lock); 843 switch (rdma_node_get_transport(id->device->node_type)) { 844 case RDMA_TRANSPORT_IB: 845 if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib)) 846 ib_destroy_cm_id(id_priv->cm_id.ib); 847 break; 848 case RDMA_TRANSPORT_IWARP: 849 if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw)) 850 iw_destroy_cm_id(id_priv->cm_id.iw); 851 break; 852 default: 853 break; 854 } 855 cma_leave_mc_groups(id_priv); 856 mutex_lock(&lock); 857 cma_detach_from_dev(id_priv); 858 } 859 mutex_unlock(&lock); 860 861 cma_release_port(id_priv); 862 cma_deref_id(id_priv); 863 wait_for_completion(&id_priv->comp); 864 865 if (id_priv->internal_id) 866 cma_deref_id(id_priv->id.context); 867 868 kfree(id_priv->id.route.path_rec); 869 kfree(id_priv); 870 } 871 EXPORT_SYMBOL(rdma_destroy_id); 872 873 static int cma_rep_recv(struct rdma_id_private *id_priv) 874 { 875 int ret; 876 877 ret = cma_modify_qp_rtr(id_priv, NULL); 878 if (ret) 879 goto reject; 880 881 ret = cma_modify_qp_rts(id_priv, NULL); 882 if (ret) 883 goto reject; 884 885 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0); 886 if (ret) 887 goto reject; 888 889 return 0; 890 reject: 891 cma_modify_qp_err(id_priv); 892 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, 893 NULL, 0, NULL, 0); 894 return ret; 895 } 896 897 static int cma_verify_rep(struct rdma_id_private *id_priv, void *data) 898 { 899 if (id_priv->id.ps == RDMA_PS_SDP && 900 sdp_get_majv(((struct sdp_hah *) data)->sdp_version) != 901 SDP_MAJ_VERSION) 902 return -EINVAL; 903 904 return 0; 905 } 906 907 static void cma_set_rep_event_data(struct rdma_cm_event *event, 908 struct ib_cm_rep_event_param *rep_data, 909 void *private_data) 910 { 911 event->param.conn.private_data = private_data; 912 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE; 913 event->param.conn.responder_resources = rep_data->responder_resources; 914 event->param.conn.initiator_depth = rep_data->initiator_depth; 915 event->param.conn.flow_control = rep_data->flow_control; 916 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count; 917 event->param.conn.srq = rep_data->srq; 918 event->param.conn.qp_num = rep_data->remote_qpn; 919 } 920 921 static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 922 { 923 struct rdma_id_private *id_priv = cm_id->context; 924 struct rdma_cm_event event; 925 int ret = 0; 926 927 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && 928 cma_disable_callback(id_priv, CMA_CONNECT)) || 929 (ib_event->event == IB_CM_TIMEWAIT_EXIT && 930 cma_disable_callback(id_priv, CMA_DISCONNECT))) 931 return 0; 932 933 memset(&event, 0, sizeof event); 934 switch (ib_event->event) { 935 case IB_CM_REQ_ERROR: 936 case IB_CM_REP_ERROR: 937 event.event = RDMA_CM_EVENT_UNREACHABLE; 938 event.status = -ETIMEDOUT; 939 break; 940 case IB_CM_REP_RECEIVED: 941 event.status = cma_verify_rep(id_priv, ib_event->private_data); 942 if (event.status) 943 event.event = RDMA_CM_EVENT_CONNECT_ERROR; 944 else if (id_priv->id.qp && id_priv->id.ps != RDMA_PS_SDP) { 945 event.status = cma_rep_recv(id_priv); 946 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR : 947 RDMA_CM_EVENT_ESTABLISHED; 948 } else 949 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE; 950 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd, 951 ib_event->private_data); 952 break; 953 case IB_CM_RTU_RECEIVED: 954 case IB_CM_USER_ESTABLISHED: 955 event.event = RDMA_CM_EVENT_ESTABLISHED; 956 break; 957 case IB_CM_DREQ_ERROR: 958 event.status = -ETIMEDOUT; /* fall through */ 959 case IB_CM_DREQ_RECEIVED: 960 case IB_CM_DREP_RECEIVED: 961 if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT)) 962 goto out; 963 event.event = RDMA_CM_EVENT_DISCONNECTED; 964 break; 965 case IB_CM_TIMEWAIT_EXIT: 966 event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT; 967 break; 968 case IB_CM_MRA_RECEIVED: 969 /* ignore event */ 970 goto out; 971 case IB_CM_REJ_RECEIVED: 972 cma_modify_qp_err(id_priv); 973 event.status = ib_event->param.rej_rcvd.reason; 974 event.event = RDMA_CM_EVENT_REJECTED; 975 event.param.conn.private_data = ib_event->private_data; 976 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; 977 break; 978 default: 979 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n", 980 ib_event->event); 981 goto out; 982 } 983 984 ret = id_priv->id.event_handler(&id_priv->id, &event); 985 if (ret) { 986 /* Destroy the CM ID by returning a non-zero value. */ 987 id_priv->cm_id.ib = NULL; 988 cma_exch(id_priv, CMA_DESTROYING); 989 mutex_unlock(&id_priv->handler_mutex); 990 rdma_destroy_id(&id_priv->id); 991 return ret; 992 } 993 out: 994 mutex_unlock(&id_priv->handler_mutex); 995 return ret; 996 } 997 998 static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, 999 struct ib_cm_event *ib_event) 1000 { 1001 struct rdma_id_private *id_priv; 1002 struct rdma_cm_id *id; 1003 struct rdma_route *rt; 1004 union cma_ip_addr *src, *dst; 1005 __be16 port; 1006 u8 ip_ver; 1007 int ret; 1008 1009 if (cma_get_net_info(ib_event->private_data, listen_id->ps, 1010 &ip_ver, &port, &src, &dst)) 1011 goto err; 1012 1013 id = rdma_create_id(listen_id->event_handler, listen_id->context, 1014 listen_id->ps); 1015 if (IS_ERR(id)) 1016 goto err; 1017 1018 cma_save_net_info(&id->route.addr, &listen_id->route.addr, 1019 ip_ver, port, src, dst); 1020 1021 rt = &id->route; 1022 rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1; 1023 rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths, 1024 GFP_KERNEL); 1025 if (!rt->path_rec) 1026 goto destroy_id; 1027 1028 rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path; 1029 if (rt->num_paths == 2) 1030 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; 1031 1032 ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); 1033 ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr, 1034 &id->route.addr.dev_addr); 1035 if (ret) 1036 goto destroy_id; 1037 1038 id_priv = container_of(id, struct rdma_id_private, id); 1039 id_priv->state = CMA_CONNECT; 1040 return id_priv; 1041 1042 destroy_id: 1043 rdma_destroy_id(id); 1044 err: 1045 return NULL; 1046 } 1047 1048 static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id, 1049 struct ib_cm_event *ib_event) 1050 { 1051 struct rdma_id_private *id_priv; 1052 struct rdma_cm_id *id; 1053 union cma_ip_addr *src, *dst; 1054 __be16 port; 1055 u8 ip_ver; 1056 int ret; 1057 1058 id = rdma_create_id(listen_id->event_handler, listen_id->context, 1059 listen_id->ps); 1060 if (IS_ERR(id)) 1061 return NULL; 1062 1063 1064 if (cma_get_net_info(ib_event->private_data, listen_id->ps, 1065 &ip_ver, &port, &src, &dst)) 1066 goto err; 1067 1068 cma_save_net_info(&id->route.addr, &listen_id->route.addr, 1069 ip_ver, port, src, dst); 1070 1071 ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr, 1072 &id->route.addr.dev_addr); 1073 if (ret) 1074 goto err; 1075 1076 id_priv = container_of(id, struct rdma_id_private, id); 1077 id_priv->state = CMA_CONNECT; 1078 return id_priv; 1079 err: 1080 rdma_destroy_id(id); 1081 return NULL; 1082 } 1083 1084 static void cma_set_req_event_data(struct rdma_cm_event *event, 1085 struct ib_cm_req_event_param *req_data, 1086 void *private_data, int offset) 1087 { 1088 event->param.conn.private_data = private_data + offset; 1089 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset; 1090 event->param.conn.responder_resources = req_data->responder_resources; 1091 event->param.conn.initiator_depth = req_data->initiator_depth; 1092 event->param.conn.flow_control = req_data->flow_control; 1093 event->param.conn.retry_count = req_data->retry_count; 1094 event->param.conn.rnr_retry_count = req_data->rnr_retry_count; 1095 event->param.conn.srq = req_data->srq; 1096 event->param.conn.qp_num = req_data->remote_qpn; 1097 } 1098 1099 static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 1100 { 1101 struct rdma_id_private *listen_id, *conn_id; 1102 struct rdma_cm_event event; 1103 int offset, ret; 1104 1105 listen_id = cm_id->context; 1106 if (cma_disable_callback(listen_id, CMA_LISTEN)) 1107 return -ECONNABORTED; 1108 1109 memset(&event, 0, sizeof event); 1110 offset = cma_user_data_offset(listen_id->id.ps); 1111 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 1112 if (cma_is_ud_ps(listen_id->id.ps)) { 1113 conn_id = cma_new_udp_id(&listen_id->id, ib_event); 1114 event.param.ud.private_data = ib_event->private_data + offset; 1115 event.param.ud.private_data_len = 1116 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset; 1117 } else { 1118 conn_id = cma_new_conn_id(&listen_id->id, ib_event); 1119 cma_set_req_event_data(&event, &ib_event->param.req_rcvd, 1120 ib_event->private_data, offset); 1121 } 1122 if (!conn_id) { 1123 ret = -ENOMEM; 1124 goto out; 1125 } 1126 1127 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 1128 mutex_lock(&lock); 1129 ret = cma_acquire_dev(conn_id); 1130 mutex_unlock(&lock); 1131 if (ret) 1132 goto release_conn_id; 1133 1134 conn_id->cm_id.ib = cm_id; 1135 cm_id->context = conn_id; 1136 cm_id->cm_handler = cma_ib_handler; 1137 1138 ret = conn_id->id.event_handler(&conn_id->id, &event); 1139 if (!ret) { 1140 /* 1141 * Acquire mutex to prevent user executing rdma_destroy_id() 1142 * while we're accessing the cm_id. 1143 */ 1144 mutex_lock(&lock); 1145 if (cma_comp(conn_id, CMA_CONNECT) && 1146 !cma_is_ud_ps(conn_id->id.ps)) 1147 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); 1148 mutex_unlock(&lock); 1149 mutex_unlock(&conn_id->handler_mutex); 1150 goto out; 1151 } 1152 1153 /* Destroy the CM ID by returning a non-zero value. */ 1154 conn_id->cm_id.ib = NULL; 1155 1156 release_conn_id: 1157 cma_exch(conn_id, CMA_DESTROYING); 1158 mutex_unlock(&conn_id->handler_mutex); 1159 rdma_destroy_id(&conn_id->id); 1160 1161 out: 1162 mutex_unlock(&listen_id->handler_mutex); 1163 return ret; 1164 } 1165 1166 static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr) 1167 { 1168 return cpu_to_be64(((u64)ps << 16) + be16_to_cpu(cma_port(addr))); 1169 } 1170 1171 static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr, 1172 struct ib_cm_compare_data *compare) 1173 { 1174 struct cma_hdr *cma_data, *cma_mask; 1175 struct sdp_hh *sdp_data, *sdp_mask; 1176 __be32 ip4_addr; 1177 struct in6_addr ip6_addr; 1178 1179 memset(compare, 0, sizeof *compare); 1180 cma_data = (void *) compare->data; 1181 cma_mask = (void *) compare->mask; 1182 sdp_data = (void *) compare->data; 1183 sdp_mask = (void *) compare->mask; 1184 1185 switch (addr->sa_family) { 1186 case AF_INET: 1187 ip4_addr = ((struct sockaddr_in *) addr)->sin_addr.s_addr; 1188 if (ps == RDMA_PS_SDP) { 1189 sdp_set_ip_ver(sdp_data, 4); 1190 sdp_set_ip_ver(sdp_mask, 0xF); 1191 sdp_data->dst_addr.ip4.addr = ip4_addr; 1192 sdp_mask->dst_addr.ip4.addr = htonl(~0); 1193 } else { 1194 cma_set_ip_ver(cma_data, 4); 1195 cma_set_ip_ver(cma_mask, 0xF); 1196 cma_data->dst_addr.ip4.addr = ip4_addr; 1197 cma_mask->dst_addr.ip4.addr = htonl(~0); 1198 } 1199 break; 1200 case AF_INET6: 1201 ip6_addr = ((struct sockaddr_in6 *) addr)->sin6_addr; 1202 if (ps == RDMA_PS_SDP) { 1203 sdp_set_ip_ver(sdp_data, 6); 1204 sdp_set_ip_ver(sdp_mask, 0xF); 1205 sdp_data->dst_addr.ip6 = ip6_addr; 1206 memset(&sdp_mask->dst_addr.ip6, 0xFF, 1207 sizeof sdp_mask->dst_addr.ip6); 1208 } else { 1209 cma_set_ip_ver(cma_data, 6); 1210 cma_set_ip_ver(cma_mask, 0xF); 1211 cma_data->dst_addr.ip6 = ip6_addr; 1212 memset(&cma_mask->dst_addr.ip6, 0xFF, 1213 sizeof cma_mask->dst_addr.ip6); 1214 } 1215 break; 1216 default: 1217 break; 1218 } 1219 } 1220 1221 static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) 1222 { 1223 struct rdma_id_private *id_priv = iw_id->context; 1224 struct rdma_cm_event event; 1225 struct sockaddr_in *sin; 1226 int ret = 0; 1227 1228 if (cma_disable_callback(id_priv, CMA_CONNECT)) 1229 return 0; 1230 1231 memset(&event, 0, sizeof event); 1232 switch (iw_event->event) { 1233 case IW_CM_EVENT_CLOSE: 1234 event.event = RDMA_CM_EVENT_DISCONNECTED; 1235 break; 1236 case IW_CM_EVENT_CONNECT_REPLY: 1237 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 1238 *sin = iw_event->local_addr; 1239 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr; 1240 *sin = iw_event->remote_addr; 1241 switch (iw_event->status) { 1242 case 0: 1243 event.event = RDMA_CM_EVENT_ESTABLISHED; 1244 break; 1245 case -ECONNRESET: 1246 case -ECONNREFUSED: 1247 event.event = RDMA_CM_EVENT_REJECTED; 1248 break; 1249 case -ETIMEDOUT: 1250 event.event = RDMA_CM_EVENT_UNREACHABLE; 1251 break; 1252 default: 1253 event.event = RDMA_CM_EVENT_CONNECT_ERROR; 1254 break; 1255 } 1256 break; 1257 case IW_CM_EVENT_ESTABLISHED: 1258 event.event = RDMA_CM_EVENT_ESTABLISHED; 1259 break; 1260 default: 1261 BUG_ON(1); 1262 } 1263 1264 event.status = iw_event->status; 1265 event.param.conn.private_data = iw_event->private_data; 1266 event.param.conn.private_data_len = iw_event->private_data_len; 1267 ret = id_priv->id.event_handler(&id_priv->id, &event); 1268 if (ret) { 1269 /* Destroy the CM ID by returning a non-zero value. */ 1270 id_priv->cm_id.iw = NULL; 1271 cma_exch(id_priv, CMA_DESTROYING); 1272 mutex_unlock(&id_priv->handler_mutex); 1273 rdma_destroy_id(&id_priv->id); 1274 return ret; 1275 } 1276 1277 mutex_unlock(&id_priv->handler_mutex); 1278 return ret; 1279 } 1280 1281 static int iw_conn_req_handler(struct iw_cm_id *cm_id, 1282 struct iw_cm_event *iw_event) 1283 { 1284 struct rdma_cm_id *new_cm_id; 1285 struct rdma_id_private *listen_id, *conn_id; 1286 struct sockaddr_in *sin; 1287 struct net_device *dev = NULL; 1288 struct rdma_cm_event event; 1289 int ret; 1290 struct ib_device_attr attr; 1291 1292 listen_id = cm_id->context; 1293 if (cma_disable_callback(listen_id, CMA_LISTEN)) 1294 return -ECONNABORTED; 1295 1296 /* Create a new RDMA id for the new IW CM ID */ 1297 new_cm_id = rdma_create_id(listen_id->id.event_handler, 1298 listen_id->id.context, 1299 RDMA_PS_TCP); 1300 if (IS_ERR(new_cm_id)) { 1301 ret = -ENOMEM; 1302 goto out; 1303 } 1304 conn_id = container_of(new_cm_id, struct rdma_id_private, id); 1305 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 1306 conn_id->state = CMA_CONNECT; 1307 1308 dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr); 1309 if (!dev) { 1310 ret = -EADDRNOTAVAIL; 1311 mutex_unlock(&conn_id->handler_mutex); 1312 rdma_destroy_id(new_cm_id); 1313 goto out; 1314 } 1315 ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL); 1316 if (ret) { 1317 mutex_unlock(&conn_id->handler_mutex); 1318 rdma_destroy_id(new_cm_id); 1319 goto out; 1320 } 1321 1322 mutex_lock(&lock); 1323 ret = cma_acquire_dev(conn_id); 1324 mutex_unlock(&lock); 1325 if (ret) { 1326 mutex_unlock(&conn_id->handler_mutex); 1327 rdma_destroy_id(new_cm_id); 1328 goto out; 1329 } 1330 1331 conn_id->cm_id.iw = cm_id; 1332 cm_id->context = conn_id; 1333 cm_id->cm_handler = cma_iw_handler; 1334 1335 sin = (struct sockaddr_in *) &new_cm_id->route.addr.src_addr; 1336 *sin = iw_event->local_addr; 1337 sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr; 1338 *sin = iw_event->remote_addr; 1339 1340 ret = ib_query_device(conn_id->id.device, &attr); 1341 if (ret) { 1342 mutex_unlock(&conn_id->handler_mutex); 1343 rdma_destroy_id(new_cm_id); 1344 goto out; 1345 } 1346 1347 memset(&event, 0, sizeof event); 1348 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 1349 event.param.conn.private_data = iw_event->private_data; 1350 event.param.conn.private_data_len = iw_event->private_data_len; 1351 event.param.conn.initiator_depth = attr.max_qp_init_rd_atom; 1352 event.param.conn.responder_resources = attr.max_qp_rd_atom; 1353 ret = conn_id->id.event_handler(&conn_id->id, &event); 1354 if (ret) { 1355 /* User wants to destroy the CM ID */ 1356 conn_id->cm_id.iw = NULL; 1357 cma_exch(conn_id, CMA_DESTROYING); 1358 mutex_unlock(&conn_id->handler_mutex); 1359 rdma_destroy_id(&conn_id->id); 1360 goto out; 1361 } 1362 1363 mutex_unlock(&conn_id->handler_mutex); 1364 1365 out: 1366 if (dev) 1367 dev_put(dev); 1368 mutex_unlock(&listen_id->handler_mutex); 1369 return ret; 1370 } 1371 1372 static int cma_ib_listen(struct rdma_id_private *id_priv) 1373 { 1374 struct ib_cm_compare_data compare_data; 1375 struct sockaddr *addr; 1376 __be64 svc_id; 1377 int ret; 1378 1379 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_req_handler, 1380 id_priv); 1381 if (IS_ERR(id_priv->cm_id.ib)) 1382 return PTR_ERR(id_priv->cm_id.ib); 1383 1384 addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr; 1385 svc_id = cma_get_service_id(id_priv->id.ps, addr); 1386 if (cma_any_addr(addr)) 1387 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL); 1388 else { 1389 cma_set_compare_data(id_priv->id.ps, addr, &compare_data); 1390 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, &compare_data); 1391 } 1392 1393 if (ret) { 1394 ib_destroy_cm_id(id_priv->cm_id.ib); 1395 id_priv->cm_id.ib = NULL; 1396 } 1397 1398 return ret; 1399 } 1400 1401 static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) 1402 { 1403 int ret; 1404 struct sockaddr_in *sin; 1405 1406 id_priv->cm_id.iw = iw_create_cm_id(id_priv->id.device, 1407 iw_conn_req_handler, 1408 id_priv); 1409 if (IS_ERR(id_priv->cm_id.iw)) 1410 return PTR_ERR(id_priv->cm_id.iw); 1411 1412 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 1413 id_priv->cm_id.iw->local_addr = *sin; 1414 1415 ret = iw_cm_listen(id_priv->cm_id.iw, backlog); 1416 1417 if (ret) { 1418 iw_destroy_cm_id(id_priv->cm_id.iw); 1419 id_priv->cm_id.iw = NULL; 1420 } 1421 1422 return ret; 1423 } 1424 1425 static int cma_listen_handler(struct rdma_cm_id *id, 1426 struct rdma_cm_event *event) 1427 { 1428 struct rdma_id_private *id_priv = id->context; 1429 1430 id->context = id_priv->id.context; 1431 id->event_handler = id_priv->id.event_handler; 1432 return id_priv->id.event_handler(id, event); 1433 } 1434 1435 static void cma_listen_on_dev(struct rdma_id_private *id_priv, 1436 struct cma_device *cma_dev) 1437 { 1438 struct rdma_id_private *dev_id_priv; 1439 struct rdma_cm_id *id; 1440 int ret; 1441 1442 id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps); 1443 if (IS_ERR(id)) 1444 return; 1445 1446 dev_id_priv = container_of(id, struct rdma_id_private, id); 1447 1448 dev_id_priv->state = CMA_ADDR_BOUND; 1449 memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr, 1450 ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr)); 1451 1452 cma_attach_to_dev(dev_id_priv, cma_dev); 1453 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); 1454 atomic_inc(&id_priv->refcount); 1455 dev_id_priv->internal_id = 1; 1456 1457 ret = rdma_listen(id, id_priv->backlog); 1458 if (ret) 1459 printk(KERN_WARNING "RDMA CMA: cma_listen_on_dev, error %d, " 1460 "listening on device %s\n", ret, cma_dev->device->name); 1461 } 1462 1463 static void cma_listen_on_all(struct rdma_id_private *id_priv) 1464 { 1465 struct cma_device *cma_dev; 1466 1467 mutex_lock(&lock); 1468 list_add_tail(&id_priv->list, &listen_any_list); 1469 list_for_each_entry(cma_dev, &dev_list, list) 1470 cma_listen_on_dev(id_priv, cma_dev); 1471 mutex_unlock(&lock); 1472 } 1473 1474 static int cma_bind_any(struct rdma_cm_id *id, sa_family_t af) 1475 { 1476 struct sockaddr_storage addr_in; 1477 1478 memset(&addr_in, 0, sizeof addr_in); 1479 addr_in.ss_family = af; 1480 return rdma_bind_addr(id, (struct sockaddr *) &addr_in); 1481 } 1482 1483 int rdma_listen(struct rdma_cm_id *id, int backlog) 1484 { 1485 struct rdma_id_private *id_priv; 1486 int ret; 1487 1488 id_priv = container_of(id, struct rdma_id_private, id); 1489 if (id_priv->state == CMA_IDLE) { 1490 ret = cma_bind_any(id, AF_INET); 1491 if (ret) 1492 return ret; 1493 } 1494 1495 if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN)) 1496 return -EINVAL; 1497 1498 id_priv->backlog = backlog; 1499 if (id->device) { 1500 switch (rdma_node_get_transport(id->device->node_type)) { 1501 case RDMA_TRANSPORT_IB: 1502 ret = cma_ib_listen(id_priv); 1503 if (ret) 1504 goto err; 1505 break; 1506 case RDMA_TRANSPORT_IWARP: 1507 ret = cma_iw_listen(id_priv, backlog); 1508 if (ret) 1509 goto err; 1510 break; 1511 default: 1512 ret = -ENOSYS; 1513 goto err; 1514 } 1515 } else 1516 cma_listen_on_all(id_priv); 1517 1518 return 0; 1519 err: 1520 id_priv->backlog = 0; 1521 cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND); 1522 return ret; 1523 } 1524 EXPORT_SYMBOL(rdma_listen); 1525 1526 void rdma_set_service_type(struct rdma_cm_id *id, int tos) 1527 { 1528 struct rdma_id_private *id_priv; 1529 1530 id_priv = container_of(id, struct rdma_id_private, id); 1531 id_priv->tos = (u8) tos; 1532 } 1533 EXPORT_SYMBOL(rdma_set_service_type); 1534 1535 static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec, 1536 void *context) 1537 { 1538 struct cma_work *work = context; 1539 struct rdma_route *route; 1540 1541 route = &work->id->id.route; 1542 1543 if (!status) { 1544 route->num_paths = 1; 1545 *route->path_rec = *path_rec; 1546 } else { 1547 work->old_state = CMA_ROUTE_QUERY; 1548 work->new_state = CMA_ADDR_RESOLVED; 1549 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; 1550 work->event.status = status; 1551 } 1552 1553 queue_work(cma_wq, &work->work); 1554 } 1555 1556 static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, 1557 struct cma_work *work) 1558 { 1559 struct rdma_addr *addr = &id_priv->id.route.addr; 1560 struct ib_sa_path_rec path_rec; 1561 ib_sa_comp_mask comp_mask; 1562 struct sockaddr_in6 *sin6; 1563 1564 memset(&path_rec, 0, sizeof path_rec); 1565 ib_addr_get_sgid(&addr->dev_addr, &path_rec.sgid); 1566 ib_addr_get_dgid(&addr->dev_addr, &path_rec.dgid); 1567 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr)); 1568 path_rec.numb_path = 1; 1569 path_rec.reversible = 1; 1570 path_rec.service_id = cma_get_service_id(id_priv->id.ps, 1571 (struct sockaddr *) &addr->dst_addr); 1572 1573 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | 1574 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | 1575 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID; 1576 1577 if (addr->src_addr.ss_family == AF_INET) { 1578 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); 1579 comp_mask |= IB_SA_PATH_REC_QOS_CLASS; 1580 } else { 1581 sin6 = (struct sockaddr_in6 *) &addr->src_addr; 1582 path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20); 1583 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; 1584 } 1585 1586 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, 1587 id_priv->id.port_num, &path_rec, 1588 comp_mask, timeout_ms, 1589 GFP_KERNEL, cma_query_handler, 1590 work, &id_priv->query); 1591 1592 return (id_priv->query_id < 0) ? id_priv->query_id : 0; 1593 } 1594 1595 static void cma_work_handler(struct work_struct *_work) 1596 { 1597 struct cma_work *work = container_of(_work, struct cma_work, work); 1598 struct rdma_id_private *id_priv = work->id; 1599 int destroy = 0; 1600 1601 mutex_lock(&id_priv->handler_mutex); 1602 if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) 1603 goto out; 1604 1605 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 1606 cma_exch(id_priv, CMA_DESTROYING); 1607 destroy = 1; 1608 } 1609 out: 1610 mutex_unlock(&id_priv->handler_mutex); 1611 cma_deref_id(id_priv); 1612 if (destroy) 1613 rdma_destroy_id(&id_priv->id); 1614 kfree(work); 1615 } 1616 1617 static void cma_ndev_work_handler(struct work_struct *_work) 1618 { 1619 struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work); 1620 struct rdma_id_private *id_priv = work->id; 1621 int destroy = 0; 1622 1623 mutex_lock(&id_priv->handler_mutex); 1624 if (id_priv->state == CMA_DESTROYING || 1625 id_priv->state == CMA_DEVICE_REMOVAL) 1626 goto out; 1627 1628 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 1629 cma_exch(id_priv, CMA_DESTROYING); 1630 destroy = 1; 1631 } 1632 1633 out: 1634 mutex_unlock(&id_priv->handler_mutex); 1635 cma_deref_id(id_priv); 1636 if (destroy) 1637 rdma_destroy_id(&id_priv->id); 1638 kfree(work); 1639 } 1640 1641 static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) 1642 { 1643 struct rdma_route *route = &id_priv->id.route; 1644 struct cma_work *work; 1645 int ret; 1646 1647 work = kzalloc(sizeof *work, GFP_KERNEL); 1648 if (!work) 1649 return -ENOMEM; 1650 1651 work->id = id_priv; 1652 INIT_WORK(&work->work, cma_work_handler); 1653 work->old_state = CMA_ROUTE_QUERY; 1654 work->new_state = CMA_ROUTE_RESOLVED; 1655 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1656 1657 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); 1658 if (!route->path_rec) { 1659 ret = -ENOMEM; 1660 goto err1; 1661 } 1662 1663 ret = cma_query_ib_route(id_priv, timeout_ms, work); 1664 if (ret) 1665 goto err2; 1666 1667 return 0; 1668 err2: 1669 kfree(route->path_rec); 1670 route->path_rec = NULL; 1671 err1: 1672 kfree(work); 1673 return ret; 1674 } 1675 1676 int rdma_set_ib_paths(struct rdma_cm_id *id, 1677 struct ib_sa_path_rec *path_rec, int num_paths) 1678 { 1679 struct rdma_id_private *id_priv; 1680 int ret; 1681 1682 id_priv = container_of(id, struct rdma_id_private, id); 1683 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED)) 1684 return -EINVAL; 1685 1686 id->route.path_rec = kmalloc(sizeof *path_rec * num_paths, GFP_KERNEL); 1687 if (!id->route.path_rec) { 1688 ret = -ENOMEM; 1689 goto err; 1690 } 1691 1692 memcpy(id->route.path_rec, path_rec, sizeof *path_rec * num_paths); 1693 return 0; 1694 err: 1695 cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED); 1696 return ret; 1697 } 1698 EXPORT_SYMBOL(rdma_set_ib_paths); 1699 1700 static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms) 1701 { 1702 struct cma_work *work; 1703 1704 work = kzalloc(sizeof *work, GFP_KERNEL); 1705 if (!work) 1706 return -ENOMEM; 1707 1708 work->id = id_priv; 1709 INIT_WORK(&work->work, cma_work_handler); 1710 work->old_state = CMA_ROUTE_QUERY; 1711 work->new_state = CMA_ROUTE_RESOLVED; 1712 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1713 queue_work(cma_wq, &work->work); 1714 return 0; 1715 } 1716 1717 int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) 1718 { 1719 struct rdma_id_private *id_priv; 1720 int ret; 1721 1722 id_priv = container_of(id, struct rdma_id_private, id); 1723 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_QUERY)) 1724 return -EINVAL; 1725 1726 atomic_inc(&id_priv->refcount); 1727 switch (rdma_node_get_transport(id->device->node_type)) { 1728 case RDMA_TRANSPORT_IB: 1729 ret = cma_resolve_ib_route(id_priv, timeout_ms); 1730 break; 1731 case RDMA_TRANSPORT_IWARP: 1732 ret = cma_resolve_iw_route(id_priv, timeout_ms); 1733 break; 1734 default: 1735 ret = -ENOSYS; 1736 break; 1737 } 1738 if (ret) 1739 goto err; 1740 1741 return 0; 1742 err: 1743 cma_comp_exch(id_priv, CMA_ROUTE_QUERY, CMA_ADDR_RESOLVED); 1744 cma_deref_id(id_priv); 1745 return ret; 1746 } 1747 EXPORT_SYMBOL(rdma_resolve_route); 1748 1749 static int cma_bind_loopback(struct rdma_id_private *id_priv) 1750 { 1751 struct cma_device *cma_dev; 1752 struct ib_port_attr port_attr; 1753 union ib_gid gid; 1754 u16 pkey; 1755 int ret; 1756 u8 p; 1757 1758 mutex_lock(&lock); 1759 if (list_empty(&dev_list)) { 1760 ret = -ENODEV; 1761 goto out; 1762 } 1763 list_for_each_entry(cma_dev, &dev_list, list) 1764 for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p) 1765 if (!ib_query_port(cma_dev->device, p, &port_attr) && 1766 port_attr.state == IB_PORT_ACTIVE) 1767 goto port_found; 1768 1769 p = 1; 1770 cma_dev = list_entry(dev_list.next, struct cma_device, list); 1771 1772 port_found: 1773 ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid); 1774 if (ret) 1775 goto out; 1776 1777 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey); 1778 if (ret) 1779 goto out; 1780 1781 ib_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); 1782 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); 1783 id_priv->id.port_num = p; 1784 cma_attach_to_dev(id_priv, cma_dev); 1785 out: 1786 mutex_unlock(&lock); 1787 return ret; 1788 } 1789 1790 static void addr_handler(int status, struct sockaddr *src_addr, 1791 struct rdma_dev_addr *dev_addr, void *context) 1792 { 1793 struct rdma_id_private *id_priv = context; 1794 struct rdma_cm_event event; 1795 1796 memset(&event, 0, sizeof event); 1797 mutex_lock(&id_priv->handler_mutex); 1798 1799 /* 1800 * Grab mutex to block rdma_destroy_id() from removing the device while 1801 * we're trying to acquire it. 1802 */ 1803 mutex_lock(&lock); 1804 if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) { 1805 mutex_unlock(&lock); 1806 goto out; 1807 } 1808 1809 if (!status && !id_priv->cma_dev) 1810 status = cma_acquire_dev(id_priv); 1811 mutex_unlock(&lock); 1812 1813 if (status) { 1814 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND)) 1815 goto out; 1816 event.event = RDMA_CM_EVENT_ADDR_ERROR; 1817 event.status = status; 1818 } else { 1819 memcpy(&id_priv->id.route.addr.src_addr, src_addr, 1820 ip_addr_size(src_addr)); 1821 event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 1822 } 1823 1824 if (id_priv->id.event_handler(&id_priv->id, &event)) { 1825 cma_exch(id_priv, CMA_DESTROYING); 1826 mutex_unlock(&id_priv->handler_mutex); 1827 cma_deref_id(id_priv); 1828 rdma_destroy_id(&id_priv->id); 1829 return; 1830 } 1831 out: 1832 mutex_unlock(&id_priv->handler_mutex); 1833 cma_deref_id(id_priv); 1834 } 1835 1836 static int cma_resolve_loopback(struct rdma_id_private *id_priv) 1837 { 1838 struct cma_work *work; 1839 struct sockaddr_in *src_in, *dst_in; 1840 union ib_gid gid; 1841 int ret; 1842 1843 work = kzalloc(sizeof *work, GFP_KERNEL); 1844 if (!work) 1845 return -ENOMEM; 1846 1847 if (!id_priv->cma_dev) { 1848 ret = cma_bind_loopback(id_priv); 1849 if (ret) 1850 goto err; 1851 } 1852 1853 ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); 1854 ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); 1855 1856 if (cma_zero_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)) { 1857 src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr; 1858 dst_in = (struct sockaddr_in *)&id_priv->id.route.addr.dst_addr; 1859 src_in->sin_family = dst_in->sin_family; 1860 src_in->sin_addr.s_addr = dst_in->sin_addr.s_addr; 1861 } 1862 1863 work->id = id_priv; 1864 INIT_WORK(&work->work, cma_work_handler); 1865 work->old_state = CMA_ADDR_QUERY; 1866 work->new_state = CMA_ADDR_RESOLVED; 1867 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 1868 queue_work(cma_wq, &work->work); 1869 return 0; 1870 err: 1871 kfree(work); 1872 return ret; 1873 } 1874 1875 static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 1876 struct sockaddr *dst_addr) 1877 { 1878 if (src_addr && src_addr->sa_family) 1879 return rdma_bind_addr(id, src_addr); 1880 else 1881 return cma_bind_any(id, dst_addr->sa_family); 1882 } 1883 1884 int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 1885 struct sockaddr *dst_addr, int timeout_ms) 1886 { 1887 struct rdma_id_private *id_priv; 1888 int ret; 1889 1890 id_priv = container_of(id, struct rdma_id_private, id); 1891 if (id_priv->state == CMA_IDLE) { 1892 ret = cma_bind_addr(id, src_addr, dst_addr); 1893 if (ret) 1894 return ret; 1895 } 1896 1897 if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_ADDR_QUERY)) 1898 return -EINVAL; 1899 1900 atomic_inc(&id_priv->refcount); 1901 memcpy(&id->route.addr.dst_addr, dst_addr, ip_addr_size(dst_addr)); 1902 if (cma_any_addr(dst_addr)) 1903 ret = cma_resolve_loopback(id_priv); 1904 else 1905 ret = rdma_resolve_ip(&addr_client, (struct sockaddr *) &id->route.addr.src_addr, 1906 dst_addr, &id->route.addr.dev_addr, 1907 timeout_ms, addr_handler, id_priv); 1908 if (ret) 1909 goto err; 1910 1911 return 0; 1912 err: 1913 cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND); 1914 cma_deref_id(id_priv); 1915 return ret; 1916 } 1917 EXPORT_SYMBOL(rdma_resolve_addr); 1918 1919 static void cma_bind_port(struct rdma_bind_list *bind_list, 1920 struct rdma_id_private *id_priv) 1921 { 1922 struct sockaddr_in *sin; 1923 1924 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 1925 sin->sin_port = htons(bind_list->port); 1926 id_priv->bind_list = bind_list; 1927 hlist_add_head(&id_priv->node, &bind_list->owners); 1928 } 1929 1930 static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv, 1931 unsigned short snum) 1932 { 1933 struct rdma_bind_list *bind_list; 1934 int port, ret; 1935 1936 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); 1937 if (!bind_list) 1938 return -ENOMEM; 1939 1940 do { 1941 ret = idr_get_new_above(ps, bind_list, snum, &port); 1942 } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL)); 1943 1944 if (ret) 1945 goto err1; 1946 1947 if (port != snum) { 1948 ret = -EADDRNOTAVAIL; 1949 goto err2; 1950 } 1951 1952 bind_list->ps = ps; 1953 bind_list->port = (unsigned short) port; 1954 cma_bind_port(bind_list, id_priv); 1955 return 0; 1956 err2: 1957 idr_remove(ps, port); 1958 err1: 1959 kfree(bind_list); 1960 return ret; 1961 } 1962 1963 static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv) 1964 { 1965 struct rdma_bind_list *bind_list; 1966 int port, ret, low, high; 1967 1968 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); 1969 if (!bind_list) 1970 return -ENOMEM; 1971 1972 retry: 1973 /* FIXME: add proper port randomization per like inet_csk_get_port */ 1974 do { 1975 ret = idr_get_new_above(ps, bind_list, next_port, &port); 1976 } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL)); 1977 1978 if (ret) 1979 goto err1; 1980 1981 inet_get_local_port_range(&low, &high); 1982 if (port > high) { 1983 if (next_port != low) { 1984 idr_remove(ps, port); 1985 next_port = low; 1986 goto retry; 1987 } 1988 ret = -EADDRNOTAVAIL; 1989 goto err2; 1990 } 1991 1992 if (port == high) 1993 next_port = low; 1994 else 1995 next_port = port + 1; 1996 1997 bind_list->ps = ps; 1998 bind_list->port = (unsigned short) port; 1999 cma_bind_port(bind_list, id_priv); 2000 return 0; 2001 err2: 2002 idr_remove(ps, port); 2003 err1: 2004 kfree(bind_list); 2005 return ret; 2006 } 2007 2008 static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) 2009 { 2010 struct rdma_id_private *cur_id; 2011 struct sockaddr_in *sin, *cur_sin; 2012 struct rdma_bind_list *bind_list; 2013 struct hlist_node *node; 2014 unsigned short snum; 2015 2016 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 2017 snum = ntohs(sin->sin_port); 2018 if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) 2019 return -EACCES; 2020 2021 bind_list = idr_find(ps, snum); 2022 if (!bind_list) 2023 return cma_alloc_port(ps, id_priv, snum); 2024 2025 /* 2026 * We don't support binding to any address if anyone is bound to 2027 * a specific address on the same port. 2028 */ 2029 if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)) 2030 return -EADDRNOTAVAIL; 2031 2032 hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { 2033 if (cma_any_addr((struct sockaddr *) &cur_id->id.route.addr.src_addr)) 2034 return -EADDRNOTAVAIL; 2035 2036 cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr; 2037 if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr) 2038 return -EADDRINUSE; 2039 } 2040 2041 cma_bind_port(bind_list, id_priv); 2042 return 0; 2043 } 2044 2045 static int cma_get_port(struct rdma_id_private *id_priv) 2046 { 2047 struct idr *ps; 2048 int ret; 2049 2050 switch (id_priv->id.ps) { 2051 case RDMA_PS_SDP: 2052 ps = &sdp_ps; 2053 break; 2054 case RDMA_PS_TCP: 2055 ps = &tcp_ps; 2056 break; 2057 case RDMA_PS_UDP: 2058 ps = &udp_ps; 2059 break; 2060 case RDMA_PS_IPOIB: 2061 ps = &ipoib_ps; 2062 break; 2063 default: 2064 return -EPROTONOSUPPORT; 2065 } 2066 2067 mutex_lock(&lock); 2068 if (cma_any_port((struct sockaddr *) &id_priv->id.route.addr.src_addr)) 2069 ret = cma_alloc_any_port(ps, id_priv); 2070 else 2071 ret = cma_use_port(ps, id_priv); 2072 mutex_unlock(&lock); 2073 2074 return ret; 2075 } 2076 2077 int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) 2078 { 2079 struct rdma_id_private *id_priv; 2080 int ret; 2081 2082 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6) 2083 return -EAFNOSUPPORT; 2084 2085 id_priv = container_of(id, struct rdma_id_private, id); 2086 if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND)) 2087 return -EINVAL; 2088 2089 if (!cma_any_addr(addr)) { 2090 ret = rdma_translate_ip(addr, &id->route.addr.dev_addr); 2091 if (ret) 2092 goto err1; 2093 2094 mutex_lock(&lock); 2095 ret = cma_acquire_dev(id_priv); 2096 mutex_unlock(&lock); 2097 if (ret) 2098 goto err1; 2099 } 2100 2101 memcpy(&id->route.addr.src_addr, addr, ip_addr_size(addr)); 2102 ret = cma_get_port(id_priv); 2103 if (ret) 2104 goto err2; 2105 2106 return 0; 2107 err2: 2108 if (!cma_any_addr(addr)) { 2109 mutex_lock(&lock); 2110 cma_detach_from_dev(id_priv); 2111 mutex_unlock(&lock); 2112 } 2113 err1: 2114 cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE); 2115 return ret; 2116 } 2117 EXPORT_SYMBOL(rdma_bind_addr); 2118 2119 static int cma_format_hdr(void *hdr, enum rdma_port_space ps, 2120 struct rdma_route *route) 2121 { 2122 struct cma_hdr *cma_hdr; 2123 struct sdp_hh *sdp_hdr; 2124 2125 if (route->addr.src_addr.ss_family == AF_INET) { 2126 struct sockaddr_in *src4, *dst4; 2127 2128 src4 = (struct sockaddr_in *) &route->addr.src_addr; 2129 dst4 = (struct sockaddr_in *) &route->addr.dst_addr; 2130 2131 switch (ps) { 2132 case RDMA_PS_SDP: 2133 sdp_hdr = hdr; 2134 if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION) 2135 return -EINVAL; 2136 sdp_set_ip_ver(sdp_hdr, 4); 2137 sdp_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; 2138 sdp_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; 2139 sdp_hdr->port = src4->sin_port; 2140 break; 2141 default: 2142 cma_hdr = hdr; 2143 cma_hdr->cma_version = CMA_VERSION; 2144 cma_set_ip_ver(cma_hdr, 4); 2145 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; 2146 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; 2147 cma_hdr->port = src4->sin_port; 2148 break; 2149 } 2150 } else { 2151 struct sockaddr_in6 *src6, *dst6; 2152 2153 src6 = (struct sockaddr_in6 *) &route->addr.src_addr; 2154 dst6 = (struct sockaddr_in6 *) &route->addr.dst_addr; 2155 2156 switch (ps) { 2157 case RDMA_PS_SDP: 2158 sdp_hdr = hdr; 2159 if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION) 2160 return -EINVAL; 2161 sdp_set_ip_ver(sdp_hdr, 6); 2162 sdp_hdr->src_addr.ip6 = src6->sin6_addr; 2163 sdp_hdr->dst_addr.ip6 = dst6->sin6_addr; 2164 sdp_hdr->port = src6->sin6_port; 2165 break; 2166 default: 2167 cma_hdr = hdr; 2168 cma_hdr->cma_version = CMA_VERSION; 2169 cma_set_ip_ver(cma_hdr, 6); 2170 cma_hdr->src_addr.ip6 = src6->sin6_addr; 2171 cma_hdr->dst_addr.ip6 = dst6->sin6_addr; 2172 cma_hdr->port = src6->sin6_port; 2173 break; 2174 } 2175 } 2176 return 0; 2177 } 2178 2179 static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, 2180 struct ib_cm_event *ib_event) 2181 { 2182 struct rdma_id_private *id_priv = cm_id->context; 2183 struct rdma_cm_event event; 2184 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; 2185 int ret = 0; 2186 2187 if (cma_disable_callback(id_priv, CMA_CONNECT)) 2188 return 0; 2189 2190 memset(&event, 0, sizeof event); 2191 switch (ib_event->event) { 2192 case IB_CM_SIDR_REQ_ERROR: 2193 event.event = RDMA_CM_EVENT_UNREACHABLE; 2194 event.status = -ETIMEDOUT; 2195 break; 2196 case IB_CM_SIDR_REP_RECEIVED: 2197 event.param.ud.private_data = ib_event->private_data; 2198 event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE; 2199 if (rep->status != IB_SIDR_SUCCESS) { 2200 event.event = RDMA_CM_EVENT_UNREACHABLE; 2201 event.status = ib_event->param.sidr_rep_rcvd.status; 2202 break; 2203 } 2204 if (id_priv->qkey != rep->qkey) { 2205 event.event = RDMA_CM_EVENT_UNREACHABLE; 2206 event.status = -EINVAL; 2207 break; 2208 } 2209 ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num, 2210 id_priv->id.route.path_rec, 2211 &event.param.ud.ah_attr); 2212 event.param.ud.qp_num = rep->qpn; 2213 event.param.ud.qkey = rep->qkey; 2214 event.event = RDMA_CM_EVENT_ESTABLISHED; 2215 event.status = 0; 2216 break; 2217 default: 2218 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n", 2219 ib_event->event); 2220 goto out; 2221 } 2222 2223 ret = id_priv->id.event_handler(&id_priv->id, &event); 2224 if (ret) { 2225 /* Destroy the CM ID by returning a non-zero value. */ 2226 id_priv->cm_id.ib = NULL; 2227 cma_exch(id_priv, CMA_DESTROYING); 2228 mutex_unlock(&id_priv->handler_mutex); 2229 rdma_destroy_id(&id_priv->id); 2230 return ret; 2231 } 2232 out: 2233 mutex_unlock(&id_priv->handler_mutex); 2234 return ret; 2235 } 2236 2237 static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, 2238 struct rdma_conn_param *conn_param) 2239 { 2240 struct ib_cm_sidr_req_param req; 2241 struct rdma_route *route; 2242 int ret; 2243 2244 req.private_data_len = sizeof(struct cma_hdr) + 2245 conn_param->private_data_len; 2246 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 2247 if (!req.private_data) 2248 return -ENOMEM; 2249 2250 if (conn_param->private_data && conn_param->private_data_len) 2251 memcpy((void *) req.private_data + sizeof(struct cma_hdr), 2252 conn_param->private_data, conn_param->private_data_len); 2253 2254 route = &id_priv->id.route; 2255 ret = cma_format_hdr((void *) req.private_data, id_priv->id.ps, route); 2256 if (ret) 2257 goto out; 2258 2259 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, 2260 cma_sidr_rep_handler, id_priv); 2261 if (IS_ERR(id_priv->cm_id.ib)) { 2262 ret = PTR_ERR(id_priv->cm_id.ib); 2263 goto out; 2264 } 2265 2266 req.path = route->path_rec; 2267 req.service_id = cma_get_service_id(id_priv->id.ps, 2268 (struct sockaddr *) &route->addr.dst_addr); 2269 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); 2270 req.max_cm_retries = CMA_MAX_CM_RETRIES; 2271 2272 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req); 2273 if (ret) { 2274 ib_destroy_cm_id(id_priv->cm_id.ib); 2275 id_priv->cm_id.ib = NULL; 2276 } 2277 out: 2278 kfree(req.private_data); 2279 return ret; 2280 } 2281 2282 static int cma_connect_ib(struct rdma_id_private *id_priv, 2283 struct rdma_conn_param *conn_param) 2284 { 2285 struct ib_cm_req_param req; 2286 struct rdma_route *route; 2287 void *private_data; 2288 int offset, ret; 2289 2290 memset(&req, 0, sizeof req); 2291 offset = cma_user_data_offset(id_priv->id.ps); 2292 req.private_data_len = offset + conn_param->private_data_len; 2293 private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 2294 if (!private_data) 2295 return -ENOMEM; 2296 2297 if (conn_param->private_data && conn_param->private_data_len) 2298 memcpy(private_data + offset, conn_param->private_data, 2299 conn_param->private_data_len); 2300 2301 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_ib_handler, 2302 id_priv); 2303 if (IS_ERR(id_priv->cm_id.ib)) { 2304 ret = PTR_ERR(id_priv->cm_id.ib); 2305 goto out; 2306 } 2307 2308 route = &id_priv->id.route; 2309 ret = cma_format_hdr(private_data, id_priv->id.ps, route); 2310 if (ret) 2311 goto out; 2312 req.private_data = private_data; 2313 2314 req.primary_path = &route->path_rec[0]; 2315 if (route->num_paths == 2) 2316 req.alternate_path = &route->path_rec[1]; 2317 2318 req.service_id = cma_get_service_id(id_priv->id.ps, 2319 (struct sockaddr *) &route->addr.dst_addr); 2320 req.qp_num = id_priv->qp_num; 2321 req.qp_type = IB_QPT_RC; 2322 req.starting_psn = id_priv->seq_num; 2323 req.responder_resources = conn_param->responder_resources; 2324 req.initiator_depth = conn_param->initiator_depth; 2325 req.flow_control = conn_param->flow_control; 2326 req.retry_count = conn_param->retry_count; 2327 req.rnr_retry_count = conn_param->rnr_retry_count; 2328 req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 2329 req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 2330 req.max_cm_retries = CMA_MAX_CM_RETRIES; 2331 req.srq = id_priv->srq ? 1 : 0; 2332 2333 ret = ib_send_cm_req(id_priv->cm_id.ib, &req); 2334 out: 2335 if (ret && !IS_ERR(id_priv->cm_id.ib)) { 2336 ib_destroy_cm_id(id_priv->cm_id.ib); 2337 id_priv->cm_id.ib = NULL; 2338 } 2339 2340 kfree(private_data); 2341 return ret; 2342 } 2343 2344 static int cma_connect_iw(struct rdma_id_private *id_priv, 2345 struct rdma_conn_param *conn_param) 2346 { 2347 struct iw_cm_id *cm_id; 2348 struct sockaddr_in* sin; 2349 int ret; 2350 struct iw_cm_conn_param iw_param; 2351 2352 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv); 2353 if (IS_ERR(cm_id)) { 2354 ret = PTR_ERR(cm_id); 2355 goto out; 2356 } 2357 2358 id_priv->cm_id.iw = cm_id; 2359 2360 sin = (struct sockaddr_in*) &id_priv->id.route.addr.src_addr; 2361 cm_id->local_addr = *sin; 2362 2363 sin = (struct sockaddr_in*) &id_priv->id.route.addr.dst_addr; 2364 cm_id->remote_addr = *sin; 2365 2366 ret = cma_modify_qp_rtr(id_priv, conn_param); 2367 if (ret) 2368 goto out; 2369 2370 iw_param.ord = conn_param->initiator_depth; 2371 iw_param.ird = conn_param->responder_resources; 2372 iw_param.private_data = conn_param->private_data; 2373 iw_param.private_data_len = conn_param->private_data_len; 2374 if (id_priv->id.qp) 2375 iw_param.qpn = id_priv->qp_num; 2376 else 2377 iw_param.qpn = conn_param->qp_num; 2378 ret = iw_cm_connect(cm_id, &iw_param); 2379 out: 2380 if (ret && !IS_ERR(cm_id)) { 2381 iw_destroy_cm_id(cm_id); 2382 id_priv->cm_id.iw = NULL; 2383 } 2384 return ret; 2385 } 2386 2387 int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 2388 { 2389 struct rdma_id_private *id_priv; 2390 int ret; 2391 2392 id_priv = container_of(id, struct rdma_id_private, id); 2393 if (!cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_CONNECT)) 2394 return -EINVAL; 2395 2396 if (!id->qp) { 2397 id_priv->qp_num = conn_param->qp_num; 2398 id_priv->srq = conn_param->srq; 2399 } 2400 2401 switch (rdma_node_get_transport(id->device->node_type)) { 2402 case RDMA_TRANSPORT_IB: 2403 if (cma_is_ud_ps(id->ps)) 2404 ret = cma_resolve_ib_udp(id_priv, conn_param); 2405 else 2406 ret = cma_connect_ib(id_priv, conn_param); 2407 break; 2408 case RDMA_TRANSPORT_IWARP: 2409 ret = cma_connect_iw(id_priv, conn_param); 2410 break; 2411 default: 2412 ret = -ENOSYS; 2413 break; 2414 } 2415 if (ret) 2416 goto err; 2417 2418 return 0; 2419 err: 2420 cma_comp_exch(id_priv, CMA_CONNECT, CMA_ROUTE_RESOLVED); 2421 return ret; 2422 } 2423 EXPORT_SYMBOL(rdma_connect); 2424 2425 static int cma_accept_ib(struct rdma_id_private *id_priv, 2426 struct rdma_conn_param *conn_param) 2427 { 2428 struct ib_cm_rep_param rep; 2429 int ret; 2430 2431 ret = cma_modify_qp_rtr(id_priv, conn_param); 2432 if (ret) 2433 goto out; 2434 2435 ret = cma_modify_qp_rts(id_priv, conn_param); 2436 if (ret) 2437 goto out; 2438 2439 memset(&rep, 0, sizeof rep); 2440 rep.qp_num = id_priv->qp_num; 2441 rep.starting_psn = id_priv->seq_num; 2442 rep.private_data = conn_param->private_data; 2443 rep.private_data_len = conn_param->private_data_len; 2444 rep.responder_resources = conn_param->responder_resources; 2445 rep.initiator_depth = conn_param->initiator_depth; 2446 rep.failover_accepted = 0; 2447 rep.flow_control = conn_param->flow_control; 2448 rep.rnr_retry_count = conn_param->rnr_retry_count; 2449 rep.srq = id_priv->srq ? 1 : 0; 2450 2451 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); 2452 out: 2453 return ret; 2454 } 2455 2456 static int cma_accept_iw(struct rdma_id_private *id_priv, 2457 struct rdma_conn_param *conn_param) 2458 { 2459 struct iw_cm_conn_param iw_param; 2460 int ret; 2461 2462 ret = cma_modify_qp_rtr(id_priv, conn_param); 2463 if (ret) 2464 return ret; 2465 2466 iw_param.ord = conn_param->initiator_depth; 2467 iw_param.ird = conn_param->responder_resources; 2468 iw_param.private_data = conn_param->private_data; 2469 iw_param.private_data_len = conn_param->private_data_len; 2470 if (id_priv->id.qp) { 2471 iw_param.qpn = id_priv->qp_num; 2472 } else 2473 iw_param.qpn = conn_param->qp_num; 2474 2475 return iw_cm_accept(id_priv->cm_id.iw, &iw_param); 2476 } 2477 2478 static int cma_send_sidr_rep(struct rdma_id_private *id_priv, 2479 enum ib_cm_sidr_status status, 2480 const void *private_data, int private_data_len) 2481 { 2482 struct ib_cm_sidr_rep_param rep; 2483 2484 memset(&rep, 0, sizeof rep); 2485 rep.status = status; 2486 if (status == IB_SIDR_SUCCESS) { 2487 rep.qp_num = id_priv->qp_num; 2488 rep.qkey = id_priv->qkey; 2489 } 2490 rep.private_data = private_data; 2491 rep.private_data_len = private_data_len; 2492 2493 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep); 2494 } 2495 2496 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 2497 { 2498 struct rdma_id_private *id_priv; 2499 int ret; 2500 2501 id_priv = container_of(id, struct rdma_id_private, id); 2502 if (!cma_comp(id_priv, CMA_CONNECT)) 2503 return -EINVAL; 2504 2505 if (!id->qp && conn_param) { 2506 id_priv->qp_num = conn_param->qp_num; 2507 id_priv->srq = conn_param->srq; 2508 } 2509 2510 switch (rdma_node_get_transport(id->device->node_type)) { 2511 case RDMA_TRANSPORT_IB: 2512 if (cma_is_ud_ps(id->ps)) 2513 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 2514 conn_param->private_data, 2515 conn_param->private_data_len); 2516 else if (conn_param) 2517 ret = cma_accept_ib(id_priv, conn_param); 2518 else 2519 ret = cma_rep_recv(id_priv); 2520 break; 2521 case RDMA_TRANSPORT_IWARP: 2522 ret = cma_accept_iw(id_priv, conn_param); 2523 break; 2524 default: 2525 ret = -ENOSYS; 2526 break; 2527 } 2528 2529 if (ret) 2530 goto reject; 2531 2532 return 0; 2533 reject: 2534 cma_modify_qp_err(id_priv); 2535 rdma_reject(id, NULL, 0); 2536 return ret; 2537 } 2538 EXPORT_SYMBOL(rdma_accept); 2539 2540 int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event) 2541 { 2542 struct rdma_id_private *id_priv; 2543 int ret; 2544 2545 id_priv = container_of(id, struct rdma_id_private, id); 2546 if (!cma_has_cm_dev(id_priv)) 2547 return -EINVAL; 2548 2549 switch (id->device->node_type) { 2550 case RDMA_NODE_IB_CA: 2551 ret = ib_cm_notify(id_priv->cm_id.ib, event); 2552 break; 2553 default: 2554 ret = 0; 2555 break; 2556 } 2557 return ret; 2558 } 2559 EXPORT_SYMBOL(rdma_notify); 2560 2561 int rdma_reject(struct rdma_cm_id *id, const void *private_data, 2562 u8 private_data_len) 2563 { 2564 struct rdma_id_private *id_priv; 2565 int ret; 2566 2567 id_priv = container_of(id, struct rdma_id_private, id); 2568 if (!cma_has_cm_dev(id_priv)) 2569 return -EINVAL; 2570 2571 switch (rdma_node_get_transport(id->device->node_type)) { 2572 case RDMA_TRANSPORT_IB: 2573 if (cma_is_ud_ps(id->ps)) 2574 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 2575 private_data, private_data_len); 2576 else 2577 ret = ib_send_cm_rej(id_priv->cm_id.ib, 2578 IB_CM_REJ_CONSUMER_DEFINED, NULL, 2579 0, private_data, private_data_len); 2580 break; 2581 case RDMA_TRANSPORT_IWARP: 2582 ret = iw_cm_reject(id_priv->cm_id.iw, 2583 private_data, private_data_len); 2584 break; 2585 default: 2586 ret = -ENOSYS; 2587 break; 2588 } 2589 return ret; 2590 } 2591 EXPORT_SYMBOL(rdma_reject); 2592 2593 int rdma_disconnect(struct rdma_cm_id *id) 2594 { 2595 struct rdma_id_private *id_priv; 2596 int ret; 2597 2598 id_priv = container_of(id, struct rdma_id_private, id); 2599 if (!cma_has_cm_dev(id_priv)) 2600 return -EINVAL; 2601 2602 switch (rdma_node_get_transport(id->device->node_type)) { 2603 case RDMA_TRANSPORT_IB: 2604 ret = cma_modify_qp_err(id_priv); 2605 if (ret) 2606 goto out; 2607 /* Initiate or respond to a disconnect. */ 2608 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) 2609 ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0); 2610 break; 2611 case RDMA_TRANSPORT_IWARP: 2612 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); 2613 break; 2614 default: 2615 ret = -EINVAL; 2616 break; 2617 } 2618 out: 2619 return ret; 2620 } 2621 EXPORT_SYMBOL(rdma_disconnect); 2622 2623 static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) 2624 { 2625 struct rdma_id_private *id_priv; 2626 struct cma_multicast *mc = multicast->context; 2627 struct rdma_cm_event event; 2628 int ret; 2629 2630 id_priv = mc->id_priv; 2631 if (cma_disable_callback(id_priv, CMA_ADDR_BOUND) && 2632 cma_disable_callback(id_priv, CMA_ADDR_RESOLVED)) 2633 return 0; 2634 2635 mutex_lock(&id_priv->qp_mutex); 2636 if (!status && id_priv->id.qp) 2637 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid, 2638 multicast->rec.mlid); 2639 mutex_unlock(&id_priv->qp_mutex); 2640 2641 memset(&event, 0, sizeof event); 2642 event.status = status; 2643 event.param.ud.private_data = mc->context; 2644 if (!status) { 2645 event.event = RDMA_CM_EVENT_MULTICAST_JOIN; 2646 ib_init_ah_from_mcmember(id_priv->id.device, 2647 id_priv->id.port_num, &multicast->rec, 2648 &event.param.ud.ah_attr); 2649 event.param.ud.qp_num = 0xFFFFFF; 2650 event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey); 2651 } else 2652 event.event = RDMA_CM_EVENT_MULTICAST_ERROR; 2653 2654 ret = id_priv->id.event_handler(&id_priv->id, &event); 2655 if (ret) { 2656 cma_exch(id_priv, CMA_DESTROYING); 2657 mutex_unlock(&id_priv->handler_mutex); 2658 rdma_destroy_id(&id_priv->id); 2659 return 0; 2660 } 2661 2662 mutex_unlock(&id_priv->handler_mutex); 2663 return 0; 2664 } 2665 2666 static void cma_set_mgid(struct rdma_id_private *id_priv, 2667 struct sockaddr *addr, union ib_gid *mgid) 2668 { 2669 unsigned char mc_map[MAX_ADDR_LEN]; 2670 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 2671 struct sockaddr_in *sin = (struct sockaddr_in *) addr; 2672 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr; 2673 2674 if (cma_any_addr(addr)) { 2675 memset(mgid, 0, sizeof *mgid); 2676 } else if ((addr->sa_family == AF_INET6) && 2677 ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFF10A01B) == 2678 0xFF10A01B)) { 2679 /* IPv6 address is an SA assigned MGID. */ 2680 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); 2681 } else { 2682 ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map); 2683 if (id_priv->id.ps == RDMA_PS_UDP) 2684 mc_map[7] = 0x01; /* Use RDMA CM signature */ 2685 *mgid = *(union ib_gid *) (mc_map + 4); 2686 } 2687 } 2688 2689 static int cma_join_ib_multicast(struct rdma_id_private *id_priv, 2690 struct cma_multicast *mc) 2691 { 2692 struct ib_sa_mcmember_rec rec; 2693 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 2694 ib_sa_comp_mask comp_mask; 2695 int ret; 2696 2697 ib_addr_get_mgid(dev_addr, &rec.mgid); 2698 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num, 2699 &rec.mgid, &rec); 2700 if (ret) 2701 return ret; 2702 2703 cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid); 2704 if (id_priv->id.ps == RDMA_PS_UDP) 2705 rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); 2706 ib_addr_get_sgid(dev_addr, &rec.port_gid); 2707 rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 2708 rec.join_state = 1; 2709 2710 comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | 2711 IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE | 2712 IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL | 2713 IB_SA_MCMEMBER_REC_FLOW_LABEL | 2714 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; 2715 2716 mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device, 2717 id_priv->id.port_num, &rec, 2718 comp_mask, GFP_KERNEL, 2719 cma_ib_mc_handler, mc); 2720 if (IS_ERR(mc->multicast.ib)) 2721 return PTR_ERR(mc->multicast.ib); 2722 2723 return 0; 2724 } 2725 2726 int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, 2727 void *context) 2728 { 2729 struct rdma_id_private *id_priv; 2730 struct cma_multicast *mc; 2731 int ret; 2732 2733 id_priv = container_of(id, struct rdma_id_private, id); 2734 if (!cma_comp(id_priv, CMA_ADDR_BOUND) && 2735 !cma_comp(id_priv, CMA_ADDR_RESOLVED)) 2736 return -EINVAL; 2737 2738 mc = kmalloc(sizeof *mc, GFP_KERNEL); 2739 if (!mc) 2740 return -ENOMEM; 2741 2742 memcpy(&mc->addr, addr, ip_addr_size(addr)); 2743 mc->context = context; 2744 mc->id_priv = id_priv; 2745 2746 spin_lock(&id_priv->lock); 2747 list_add(&mc->list, &id_priv->mc_list); 2748 spin_unlock(&id_priv->lock); 2749 2750 switch (rdma_node_get_transport(id->device->node_type)) { 2751 case RDMA_TRANSPORT_IB: 2752 ret = cma_join_ib_multicast(id_priv, mc); 2753 break; 2754 default: 2755 ret = -ENOSYS; 2756 break; 2757 } 2758 2759 if (ret) { 2760 spin_lock_irq(&id_priv->lock); 2761 list_del(&mc->list); 2762 spin_unlock_irq(&id_priv->lock); 2763 kfree(mc); 2764 } 2765 return ret; 2766 } 2767 EXPORT_SYMBOL(rdma_join_multicast); 2768 2769 void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) 2770 { 2771 struct rdma_id_private *id_priv; 2772 struct cma_multicast *mc; 2773 2774 id_priv = container_of(id, struct rdma_id_private, id); 2775 spin_lock_irq(&id_priv->lock); 2776 list_for_each_entry(mc, &id_priv->mc_list, list) { 2777 if (!memcmp(&mc->addr, addr, ip_addr_size(addr))) { 2778 list_del(&mc->list); 2779 spin_unlock_irq(&id_priv->lock); 2780 2781 if (id->qp) 2782 ib_detach_mcast(id->qp, 2783 &mc->multicast.ib->rec.mgid, 2784 mc->multicast.ib->rec.mlid); 2785 ib_sa_free_multicast(mc->multicast.ib); 2786 kfree(mc); 2787 return; 2788 } 2789 } 2790 spin_unlock_irq(&id_priv->lock); 2791 } 2792 EXPORT_SYMBOL(rdma_leave_multicast); 2793 2794 static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv) 2795 { 2796 struct rdma_dev_addr *dev_addr; 2797 struct cma_ndev_work *work; 2798 2799 dev_addr = &id_priv->id.route.addr.dev_addr; 2800 2801 if ((dev_addr->src_dev == ndev) && 2802 memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) { 2803 printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n", 2804 ndev->name, &id_priv->id); 2805 work = kzalloc(sizeof *work, GFP_KERNEL); 2806 if (!work) 2807 return -ENOMEM; 2808 2809 INIT_WORK(&work->work, cma_ndev_work_handler); 2810 work->id = id_priv; 2811 work->event.event = RDMA_CM_EVENT_ADDR_CHANGE; 2812 atomic_inc(&id_priv->refcount); 2813 queue_work(cma_wq, &work->work); 2814 } 2815 2816 return 0; 2817 } 2818 2819 static int cma_netdev_callback(struct notifier_block *self, unsigned long event, 2820 void *ctx) 2821 { 2822 struct net_device *ndev = (struct net_device *)ctx; 2823 struct cma_device *cma_dev; 2824 struct rdma_id_private *id_priv; 2825 int ret = NOTIFY_DONE; 2826 2827 if (dev_net(ndev) != &init_net) 2828 return NOTIFY_DONE; 2829 2830 if (event != NETDEV_BONDING_FAILOVER) 2831 return NOTIFY_DONE; 2832 2833 if (!(ndev->flags & IFF_MASTER) || !(ndev->priv_flags & IFF_BONDING)) 2834 return NOTIFY_DONE; 2835 2836 mutex_lock(&lock); 2837 list_for_each_entry(cma_dev, &dev_list, list) 2838 list_for_each_entry(id_priv, &cma_dev->id_list, list) { 2839 ret = cma_netdev_change(ndev, id_priv); 2840 if (ret) 2841 goto out; 2842 } 2843 2844 out: 2845 mutex_unlock(&lock); 2846 return ret; 2847 } 2848 2849 static struct notifier_block cma_nb = { 2850 .notifier_call = cma_netdev_callback 2851 }; 2852 2853 static void cma_add_one(struct ib_device *device) 2854 { 2855 struct cma_device *cma_dev; 2856 struct rdma_id_private *id_priv; 2857 2858 cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL); 2859 if (!cma_dev) 2860 return; 2861 2862 cma_dev->device = device; 2863 2864 init_completion(&cma_dev->comp); 2865 atomic_set(&cma_dev->refcount, 1); 2866 INIT_LIST_HEAD(&cma_dev->id_list); 2867 ib_set_client_data(device, &cma_client, cma_dev); 2868 2869 mutex_lock(&lock); 2870 list_add_tail(&cma_dev->list, &dev_list); 2871 list_for_each_entry(id_priv, &listen_any_list, list) 2872 cma_listen_on_dev(id_priv, cma_dev); 2873 mutex_unlock(&lock); 2874 } 2875 2876 static int cma_remove_id_dev(struct rdma_id_private *id_priv) 2877 { 2878 struct rdma_cm_event event; 2879 enum cma_state state; 2880 int ret = 0; 2881 2882 /* Record that we want to remove the device */ 2883 state = cma_exch(id_priv, CMA_DEVICE_REMOVAL); 2884 if (state == CMA_DESTROYING) 2885 return 0; 2886 2887 cma_cancel_operation(id_priv, state); 2888 mutex_lock(&id_priv->handler_mutex); 2889 2890 /* Check for destruction from another callback. */ 2891 if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL)) 2892 goto out; 2893 2894 memset(&event, 0, sizeof event); 2895 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL; 2896 ret = id_priv->id.event_handler(&id_priv->id, &event); 2897 out: 2898 mutex_unlock(&id_priv->handler_mutex); 2899 return ret; 2900 } 2901 2902 static void cma_process_remove(struct cma_device *cma_dev) 2903 { 2904 struct rdma_id_private *id_priv; 2905 int ret; 2906 2907 mutex_lock(&lock); 2908 while (!list_empty(&cma_dev->id_list)) { 2909 id_priv = list_entry(cma_dev->id_list.next, 2910 struct rdma_id_private, list); 2911 2912 list_del(&id_priv->listen_list); 2913 list_del_init(&id_priv->list); 2914 atomic_inc(&id_priv->refcount); 2915 mutex_unlock(&lock); 2916 2917 ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv); 2918 cma_deref_id(id_priv); 2919 if (ret) 2920 rdma_destroy_id(&id_priv->id); 2921 2922 mutex_lock(&lock); 2923 } 2924 mutex_unlock(&lock); 2925 2926 cma_deref_dev(cma_dev); 2927 wait_for_completion(&cma_dev->comp); 2928 } 2929 2930 static void cma_remove_one(struct ib_device *device) 2931 { 2932 struct cma_device *cma_dev; 2933 2934 cma_dev = ib_get_client_data(device, &cma_client); 2935 if (!cma_dev) 2936 return; 2937 2938 mutex_lock(&lock); 2939 list_del(&cma_dev->list); 2940 mutex_unlock(&lock); 2941 2942 cma_process_remove(cma_dev); 2943 kfree(cma_dev); 2944 } 2945 2946 static int cma_init(void) 2947 { 2948 int ret, low, high, remaining; 2949 2950 get_random_bytes(&next_port, sizeof next_port); 2951 inet_get_local_port_range(&low, &high); 2952 remaining = (high - low) + 1; 2953 next_port = ((unsigned int) next_port % remaining) + low; 2954 2955 cma_wq = create_singlethread_workqueue("rdma_cm"); 2956 if (!cma_wq) 2957 return -ENOMEM; 2958 2959 ib_sa_register_client(&sa_client); 2960 rdma_addr_register_client(&addr_client); 2961 register_netdevice_notifier(&cma_nb); 2962 2963 ret = ib_register_client(&cma_client); 2964 if (ret) 2965 goto err; 2966 return 0; 2967 2968 err: 2969 unregister_netdevice_notifier(&cma_nb); 2970 rdma_addr_unregister_client(&addr_client); 2971 ib_sa_unregister_client(&sa_client); 2972 destroy_workqueue(cma_wq); 2973 return ret; 2974 } 2975 2976 static void cma_cleanup(void) 2977 { 2978 ib_unregister_client(&cma_client); 2979 unregister_netdevice_notifier(&cma_nb); 2980 rdma_addr_unregister_client(&addr_client); 2981 ib_sa_unregister_client(&sa_client); 2982 destroy_workqueue(cma_wq); 2983 idr_destroy(&sdp_ps); 2984 idr_destroy(&tcp_ps); 2985 idr_destroy(&udp_ps); 2986 idr_destroy(&ipoib_ps); 2987 } 2988 2989 module_init(cma_init); 2990 module_exit(cma_cleanup); 2991