1 /* 2 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. 3 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. 5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 6 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. 7 * Copyright (c) 2005 Network Appliance, Inc. All rights reserved. 8 * 9 * This software is available to you under a choice of one of two 10 * licenses. You may choose to be licensed under the terms of the GNU 11 * General Public License (GPL) Version 2, available from the file 12 * COPYING in the main directory of this source tree, or the 13 * OpenIB.org BSD license below: 14 * 15 * Redistribution and use in source and binary forms, with or 16 * without modification, are permitted provided that the following 17 * conditions are met: 18 * 19 * - Redistributions of source code must retain the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer. 22 * 23 * - Redistributions in binary form must reproduce the above 24 * copyright notice, this list of conditions and the following 25 * disclaimer in the documentation and/or other materials 26 * provided with the distribution. 27 * 28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 35 * SOFTWARE. 36 * 37 */ 38 #include <linux/dma-mapping.h> 39 #include <linux/err.h> 40 #include <linux/idr.h> 41 #include <linux/interrupt.h> 42 #include <linux/rbtree.h> 43 #include <linux/sched.h> 44 #include <linux/spinlock.h> 45 #include <linux/workqueue.h> 46 #include <linux/completion.h> 47 #include <linux/slab.h> 48 #include <linux/module.h> 49 #include <linux/sysctl.h> 50 51 #include <rdma/iw_cm.h> 52 #include <rdma/ib_addr.h> 53 #include <rdma/iw_portmap.h> 54 #include <rdma/rdma_netlink.h> 55 56 #include "iwcm.h" 57 58 MODULE_AUTHOR("Tom Tucker"); 59 MODULE_DESCRIPTION("iWARP CM"); 60 MODULE_LICENSE("Dual BSD/GPL"); 61 62 static const char * const iwcm_rej_reason_strs[] = { 63 [ECONNRESET] = "reset by remote host", 64 [ECONNREFUSED] = "refused by remote application", 65 [ETIMEDOUT] = "setup timeout", 66 }; 67 68 const char *__attribute_const__ iwcm_reject_msg(int reason) 69 { 70 size_t index; 71 72 /* iWARP uses negative errnos */ 73 index = -reason; 74 75 if (index < ARRAY_SIZE(iwcm_rej_reason_strs) && 76 iwcm_rej_reason_strs[index]) 77 return iwcm_rej_reason_strs[index]; 78 else 79 return "unrecognized reason"; 80 } 81 EXPORT_SYMBOL(iwcm_reject_msg); 82 83 static struct rdma_nl_cbs iwcm_nl_cb_table[] = { 84 [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb}, 85 [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb}, 86 [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb}, 87 [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb}, 88 [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb}, 89 [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb}, 90 [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb} 91 }; 92 93 static struct workqueue_struct *iwcm_wq; 94 struct iwcm_work { 95 struct work_struct work; 96 struct iwcm_id_private *cm_id; 97 struct list_head list; 98 struct iw_cm_event event; 99 struct list_head free_list; 100 }; 101 102 static unsigned int default_backlog = 256; 103 104 static struct ctl_table_header *iwcm_ctl_table_hdr; 105 static struct ctl_table iwcm_ctl_table[] = { 106 { 107 .procname = "default_backlog", 108 .data = &default_backlog, 109 .maxlen = sizeof(default_backlog), 110 .mode = 0644, 111 .proc_handler = proc_dointvec, 112 }, 113 { } 114 }; 115 116 /* 117 * The following services provide a mechanism for pre-allocating iwcm_work 118 * elements. The design pre-allocates them based on the cm_id type: 119 * LISTENING IDS: Get enough elements preallocated to handle the 120 * listen backlog. 121 * ACTIVE IDS: 4: CONNECT_REPLY, ESTABLISHED, DISCONNECT, CLOSE 122 * PASSIVE IDS: 3: ESTABLISHED, DISCONNECT, CLOSE 123 * 124 * Allocating them in connect and listen avoids having to deal 125 * with allocation failures on the event upcall from the provider (which 126 * is called in the interrupt context). 127 * 128 * One exception is when creating the cm_id for incoming connection requests. 129 * There are two cases: 130 * 1) in the event upcall, cm_event_handler(), for a listening cm_id. If 131 * the backlog is exceeded, then no more connection request events will 132 * be processed. cm_event_handler() returns -ENOMEM in this case. Its up 133 * to the provider to reject the connection request. 134 * 2) in the connection request workqueue handler, cm_conn_req_handler(). 135 * If work elements cannot be allocated for the new connect request cm_id, 136 * then IWCM will call the provider reject method. This is ok since 137 * cm_conn_req_handler() runs in the workqueue thread context. 138 */ 139 140 static struct iwcm_work *get_work(struct iwcm_id_private *cm_id_priv) 141 { 142 struct iwcm_work *work; 143 144 if (list_empty(&cm_id_priv->work_free_list)) 145 return NULL; 146 work = list_entry(cm_id_priv->work_free_list.next, struct iwcm_work, 147 free_list); 148 list_del_init(&work->free_list); 149 return work; 150 } 151 152 static void put_work(struct iwcm_work *work) 153 { 154 list_add(&work->free_list, &work->cm_id->work_free_list); 155 } 156 157 static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv) 158 { 159 struct list_head *e, *tmp; 160 161 list_for_each_safe(e, tmp, &cm_id_priv->work_free_list) 162 kfree(list_entry(e, struct iwcm_work, free_list)); 163 } 164 165 static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count) 166 { 167 struct iwcm_work *work; 168 169 BUG_ON(!list_empty(&cm_id_priv->work_free_list)); 170 while (count--) { 171 work = kmalloc(sizeof(struct iwcm_work), GFP_KERNEL); 172 if (!work) { 173 dealloc_work_entries(cm_id_priv); 174 return -ENOMEM; 175 } 176 work->cm_id = cm_id_priv; 177 INIT_LIST_HEAD(&work->list); 178 put_work(work); 179 } 180 return 0; 181 } 182 183 /* 184 * Save private data from incoming connection requests to 185 * iw_cm_event, so the low level driver doesn't have to. Adjust 186 * the event ptr to point to the local copy. 187 */ 188 static int copy_private_data(struct iw_cm_event *event) 189 { 190 void *p; 191 192 p = kmemdup(event->private_data, event->private_data_len, GFP_ATOMIC); 193 if (!p) 194 return -ENOMEM; 195 event->private_data = p; 196 return 0; 197 } 198 199 static void free_cm_id(struct iwcm_id_private *cm_id_priv) 200 { 201 dealloc_work_entries(cm_id_priv); 202 kfree(cm_id_priv); 203 } 204 205 /* 206 * Release a reference on cm_id. If the last reference is being 207 * released, free the cm_id and return 1. 208 */ 209 static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv) 210 { 211 BUG_ON(atomic_read(&cm_id_priv->refcount)==0); 212 if (atomic_dec_and_test(&cm_id_priv->refcount)) { 213 BUG_ON(!list_empty(&cm_id_priv->work_list)); 214 free_cm_id(cm_id_priv); 215 return 1; 216 } 217 218 return 0; 219 } 220 221 static void add_ref(struct iw_cm_id *cm_id) 222 { 223 struct iwcm_id_private *cm_id_priv; 224 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 225 atomic_inc(&cm_id_priv->refcount); 226 } 227 228 static void rem_ref(struct iw_cm_id *cm_id) 229 { 230 struct iwcm_id_private *cm_id_priv; 231 232 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 233 234 (void)iwcm_deref_id(cm_id_priv); 235 } 236 237 static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event); 238 239 struct iw_cm_id *iw_create_cm_id(struct ib_device *device, 240 iw_cm_handler cm_handler, 241 void *context) 242 { 243 struct iwcm_id_private *cm_id_priv; 244 245 cm_id_priv = kzalloc(sizeof(*cm_id_priv), GFP_KERNEL); 246 if (!cm_id_priv) 247 return ERR_PTR(-ENOMEM); 248 249 cm_id_priv->state = IW_CM_STATE_IDLE; 250 cm_id_priv->id.device = device; 251 cm_id_priv->id.cm_handler = cm_handler; 252 cm_id_priv->id.context = context; 253 cm_id_priv->id.event_handler = cm_event_handler; 254 cm_id_priv->id.add_ref = add_ref; 255 cm_id_priv->id.rem_ref = rem_ref; 256 spin_lock_init(&cm_id_priv->lock); 257 atomic_set(&cm_id_priv->refcount, 1); 258 init_waitqueue_head(&cm_id_priv->connect_wait); 259 init_completion(&cm_id_priv->destroy_comp); 260 INIT_LIST_HEAD(&cm_id_priv->work_list); 261 INIT_LIST_HEAD(&cm_id_priv->work_free_list); 262 263 return &cm_id_priv->id; 264 } 265 EXPORT_SYMBOL(iw_create_cm_id); 266 267 268 static int iwcm_modify_qp_err(struct ib_qp *qp) 269 { 270 struct ib_qp_attr qp_attr; 271 272 if (!qp) 273 return -EINVAL; 274 275 qp_attr.qp_state = IB_QPS_ERR; 276 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE); 277 } 278 279 /* 280 * This is really the RDMAC CLOSING state. It is most similar to the 281 * IB SQD QP state. 282 */ 283 static int iwcm_modify_qp_sqd(struct ib_qp *qp) 284 { 285 struct ib_qp_attr qp_attr; 286 287 BUG_ON(qp == NULL); 288 qp_attr.qp_state = IB_QPS_SQD; 289 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE); 290 } 291 292 /* 293 * CM_ID <-- CLOSING 294 * 295 * Block if a passive or active connection is currently being processed. Then 296 * process the event as follows: 297 * - If we are ESTABLISHED, move to CLOSING and modify the QP state 298 * based on the abrupt flag 299 * - If the connection is already in the CLOSING or IDLE state, the peer is 300 * disconnecting concurrently with us and we've already seen the 301 * DISCONNECT event -- ignore the request and return 0 302 * - Disconnect on a listening endpoint returns -EINVAL 303 */ 304 int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt) 305 { 306 struct iwcm_id_private *cm_id_priv; 307 unsigned long flags; 308 int ret = 0; 309 struct ib_qp *qp = NULL; 310 311 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 312 /* Wait if we're currently in a connect or accept downcall */ 313 wait_event(cm_id_priv->connect_wait, 314 !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags)); 315 316 spin_lock_irqsave(&cm_id_priv->lock, flags); 317 switch (cm_id_priv->state) { 318 case IW_CM_STATE_ESTABLISHED: 319 cm_id_priv->state = IW_CM_STATE_CLOSING; 320 321 /* QP could be <nul> for user-mode client */ 322 if (cm_id_priv->qp) 323 qp = cm_id_priv->qp; 324 else 325 ret = -EINVAL; 326 break; 327 case IW_CM_STATE_LISTEN: 328 ret = -EINVAL; 329 break; 330 case IW_CM_STATE_CLOSING: 331 /* remote peer closed first */ 332 case IW_CM_STATE_IDLE: 333 /* accept or connect returned !0 */ 334 break; 335 case IW_CM_STATE_CONN_RECV: 336 /* 337 * App called disconnect before/without calling accept after 338 * connect_request event delivered. 339 */ 340 break; 341 case IW_CM_STATE_CONN_SENT: 342 /* Can only get here if wait above fails */ 343 default: 344 BUG(); 345 } 346 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 347 348 if (qp) { 349 if (abrupt) 350 ret = iwcm_modify_qp_err(qp); 351 else 352 ret = iwcm_modify_qp_sqd(qp); 353 354 /* 355 * If both sides are disconnecting the QP could 356 * already be in ERR or SQD states 357 */ 358 ret = 0; 359 } 360 361 return ret; 362 } 363 EXPORT_SYMBOL(iw_cm_disconnect); 364 365 /* 366 * CM_ID <-- DESTROYING 367 * 368 * Clean up all resources associated with the connection and release 369 * the initial reference taken by iw_create_cm_id. 370 */ 371 static void destroy_cm_id(struct iw_cm_id *cm_id) 372 { 373 struct iwcm_id_private *cm_id_priv; 374 unsigned long flags; 375 376 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 377 /* 378 * Wait if we're currently in a connect or accept downcall. A 379 * listening endpoint should never block here. 380 */ 381 wait_event(cm_id_priv->connect_wait, 382 !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags)); 383 384 /* 385 * Since we're deleting the cm_id, drop any events that 386 * might arrive before the last dereference. 387 */ 388 set_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags); 389 390 spin_lock_irqsave(&cm_id_priv->lock, flags); 391 switch (cm_id_priv->state) { 392 case IW_CM_STATE_LISTEN: 393 cm_id_priv->state = IW_CM_STATE_DESTROYING; 394 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 395 /* destroy the listening endpoint */ 396 cm_id->device->iwcm->destroy_listen(cm_id); 397 spin_lock_irqsave(&cm_id_priv->lock, flags); 398 break; 399 case IW_CM_STATE_ESTABLISHED: 400 cm_id_priv->state = IW_CM_STATE_DESTROYING; 401 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 402 /* Abrupt close of the connection */ 403 (void)iwcm_modify_qp_err(cm_id_priv->qp); 404 spin_lock_irqsave(&cm_id_priv->lock, flags); 405 break; 406 case IW_CM_STATE_IDLE: 407 case IW_CM_STATE_CLOSING: 408 cm_id_priv->state = IW_CM_STATE_DESTROYING; 409 break; 410 case IW_CM_STATE_CONN_RECV: 411 /* 412 * App called destroy before/without calling accept after 413 * receiving connection request event notification or 414 * returned non zero from the event callback function. 415 * In either case, must tell the provider to reject. 416 */ 417 cm_id_priv->state = IW_CM_STATE_DESTROYING; 418 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 419 cm_id->device->iwcm->reject(cm_id, NULL, 0); 420 spin_lock_irqsave(&cm_id_priv->lock, flags); 421 break; 422 case IW_CM_STATE_CONN_SENT: 423 case IW_CM_STATE_DESTROYING: 424 default: 425 BUG(); 426 break; 427 } 428 if (cm_id_priv->qp) { 429 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp); 430 cm_id_priv->qp = NULL; 431 } 432 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 433 434 if (cm_id->mapped) { 435 iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr); 436 iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM); 437 } 438 439 (void)iwcm_deref_id(cm_id_priv); 440 } 441 442 /* 443 * This function is only called by the application thread and cannot 444 * be called by the event thread. The function will wait for all 445 * references to be released on the cm_id and then kfree the cm_id 446 * object. 447 */ 448 void iw_destroy_cm_id(struct iw_cm_id *cm_id) 449 { 450 destroy_cm_id(cm_id); 451 } 452 EXPORT_SYMBOL(iw_destroy_cm_id); 453 454 /** 455 * iw_cm_check_wildcard - If IP address is 0 then use original 456 * @pm_addr: sockaddr containing the ip to check for wildcard 457 * @cm_addr: sockaddr containing the actual IP address 458 * @cm_outaddr: sockaddr to set IP addr which leaving port 459 * 460 * Checks the pm_addr for wildcard and then sets cm_outaddr's 461 * IP to the actual (cm_addr). 462 */ 463 static void iw_cm_check_wildcard(struct sockaddr_storage *pm_addr, 464 struct sockaddr_storage *cm_addr, 465 struct sockaddr_storage *cm_outaddr) 466 { 467 if (pm_addr->ss_family == AF_INET) { 468 struct sockaddr_in *pm4_addr = (struct sockaddr_in *)pm_addr; 469 470 if (pm4_addr->sin_addr.s_addr == htonl(INADDR_ANY)) { 471 struct sockaddr_in *cm4_addr = 472 (struct sockaddr_in *)cm_addr; 473 struct sockaddr_in *cm4_outaddr = 474 (struct sockaddr_in *)cm_outaddr; 475 476 cm4_outaddr->sin_addr = cm4_addr->sin_addr; 477 } 478 } else { 479 struct sockaddr_in6 *pm6_addr = (struct sockaddr_in6 *)pm_addr; 480 481 if (ipv6_addr_type(&pm6_addr->sin6_addr) == IPV6_ADDR_ANY) { 482 struct sockaddr_in6 *cm6_addr = 483 (struct sockaddr_in6 *)cm_addr; 484 struct sockaddr_in6 *cm6_outaddr = 485 (struct sockaddr_in6 *)cm_outaddr; 486 487 cm6_outaddr->sin6_addr = cm6_addr->sin6_addr; 488 } 489 } 490 } 491 492 /** 493 * iw_cm_map - Use portmapper to map the ports 494 * @cm_id: connection manager pointer 495 * @active: Indicates the active side when true 496 * returns nonzero for error only if iwpm_create_mapinfo() fails 497 * 498 * Tries to add a mapping for a port using the Portmapper. If 499 * successful in mapping the IP/Port it will check the remote 500 * mapped IP address for a wildcard IP address and replace the 501 * zero IP address with the remote_addr. 502 */ 503 static int iw_cm_map(struct iw_cm_id *cm_id, bool active) 504 { 505 struct iwpm_dev_data pm_reg_msg; 506 struct iwpm_sa_data pm_msg; 507 int status; 508 509 cm_id->m_local_addr = cm_id->local_addr; 510 cm_id->m_remote_addr = cm_id->remote_addr; 511 512 memcpy(pm_reg_msg.dev_name, cm_id->device->name, 513 sizeof(pm_reg_msg.dev_name)); 514 memcpy(pm_reg_msg.if_name, cm_id->device->iwcm->ifname, 515 sizeof(pm_reg_msg.if_name)); 516 517 if (iwpm_register_pid(&pm_reg_msg, RDMA_NL_IWCM) || 518 !iwpm_valid_pid()) 519 return 0; 520 521 cm_id->mapped = true; 522 pm_msg.loc_addr = cm_id->local_addr; 523 pm_msg.rem_addr = cm_id->remote_addr; 524 if (active) 525 status = iwpm_add_and_query_mapping(&pm_msg, 526 RDMA_NL_IWCM); 527 else 528 status = iwpm_add_mapping(&pm_msg, RDMA_NL_IWCM); 529 530 if (!status) { 531 cm_id->m_local_addr = pm_msg.mapped_loc_addr; 532 if (active) { 533 cm_id->m_remote_addr = pm_msg.mapped_rem_addr; 534 iw_cm_check_wildcard(&pm_msg.mapped_rem_addr, 535 &cm_id->remote_addr, 536 &cm_id->m_remote_addr); 537 } 538 } 539 540 return iwpm_create_mapinfo(&cm_id->local_addr, 541 &cm_id->m_local_addr, 542 RDMA_NL_IWCM); 543 } 544 545 /* 546 * CM_ID <-- LISTEN 547 * 548 * Start listening for connect requests. Generates one CONNECT_REQUEST 549 * event for each inbound connect request. 550 */ 551 int iw_cm_listen(struct iw_cm_id *cm_id, int backlog) 552 { 553 struct iwcm_id_private *cm_id_priv; 554 unsigned long flags; 555 int ret; 556 557 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 558 559 if (!backlog) 560 backlog = default_backlog; 561 562 ret = alloc_work_entries(cm_id_priv, backlog); 563 if (ret) 564 return ret; 565 566 spin_lock_irqsave(&cm_id_priv->lock, flags); 567 switch (cm_id_priv->state) { 568 case IW_CM_STATE_IDLE: 569 cm_id_priv->state = IW_CM_STATE_LISTEN; 570 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 571 ret = iw_cm_map(cm_id, false); 572 if (!ret) 573 ret = cm_id->device->iwcm->create_listen(cm_id, backlog); 574 if (ret) 575 cm_id_priv->state = IW_CM_STATE_IDLE; 576 spin_lock_irqsave(&cm_id_priv->lock, flags); 577 break; 578 default: 579 ret = -EINVAL; 580 } 581 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 582 583 return ret; 584 } 585 EXPORT_SYMBOL(iw_cm_listen); 586 587 /* 588 * CM_ID <-- IDLE 589 * 590 * Rejects an inbound connection request. No events are generated. 591 */ 592 int iw_cm_reject(struct iw_cm_id *cm_id, 593 const void *private_data, 594 u8 private_data_len) 595 { 596 struct iwcm_id_private *cm_id_priv; 597 unsigned long flags; 598 int ret; 599 600 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 601 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 602 603 spin_lock_irqsave(&cm_id_priv->lock, flags); 604 if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) { 605 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 606 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 607 wake_up_all(&cm_id_priv->connect_wait); 608 return -EINVAL; 609 } 610 cm_id_priv->state = IW_CM_STATE_IDLE; 611 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 612 613 ret = cm_id->device->iwcm->reject(cm_id, private_data, 614 private_data_len); 615 616 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 617 wake_up_all(&cm_id_priv->connect_wait); 618 619 return ret; 620 } 621 EXPORT_SYMBOL(iw_cm_reject); 622 623 /* 624 * CM_ID <-- ESTABLISHED 625 * 626 * Accepts an inbound connection request and generates an ESTABLISHED 627 * event. Callers of iw_cm_disconnect and iw_destroy_cm_id will block 628 * until the ESTABLISHED event is received from the provider. 629 */ 630 int iw_cm_accept(struct iw_cm_id *cm_id, 631 struct iw_cm_conn_param *iw_param) 632 { 633 struct iwcm_id_private *cm_id_priv; 634 struct ib_qp *qp; 635 unsigned long flags; 636 int ret; 637 638 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 639 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 640 641 spin_lock_irqsave(&cm_id_priv->lock, flags); 642 if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) { 643 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 644 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 645 wake_up_all(&cm_id_priv->connect_wait); 646 return -EINVAL; 647 } 648 /* Get the ib_qp given the QPN */ 649 qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); 650 if (!qp) { 651 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 652 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 653 wake_up_all(&cm_id_priv->connect_wait); 654 return -EINVAL; 655 } 656 cm_id->device->iwcm->add_ref(qp); 657 cm_id_priv->qp = qp; 658 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 659 660 ret = cm_id->device->iwcm->accept(cm_id, iw_param); 661 if (ret) { 662 /* An error on accept precludes provider events */ 663 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV); 664 cm_id_priv->state = IW_CM_STATE_IDLE; 665 spin_lock_irqsave(&cm_id_priv->lock, flags); 666 if (cm_id_priv->qp) { 667 cm_id->device->iwcm->rem_ref(qp); 668 cm_id_priv->qp = NULL; 669 } 670 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 671 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 672 wake_up_all(&cm_id_priv->connect_wait); 673 } 674 675 return ret; 676 } 677 EXPORT_SYMBOL(iw_cm_accept); 678 679 /* 680 * Active Side: CM_ID <-- CONN_SENT 681 * 682 * If successful, results in the generation of a CONNECT_REPLY 683 * event. iw_cm_disconnect and iw_cm_destroy will block until the 684 * CONNECT_REPLY event is received from the provider. 685 */ 686 int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) 687 { 688 struct iwcm_id_private *cm_id_priv; 689 int ret; 690 unsigned long flags; 691 struct ib_qp *qp; 692 693 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 694 695 ret = alloc_work_entries(cm_id_priv, 4); 696 if (ret) 697 return ret; 698 699 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 700 spin_lock_irqsave(&cm_id_priv->lock, flags); 701 702 if (cm_id_priv->state != IW_CM_STATE_IDLE) { 703 ret = -EINVAL; 704 goto err; 705 } 706 707 /* Get the ib_qp given the QPN */ 708 qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); 709 if (!qp) { 710 ret = -EINVAL; 711 goto err; 712 } 713 cm_id->device->iwcm->add_ref(qp); 714 cm_id_priv->qp = qp; 715 cm_id_priv->state = IW_CM_STATE_CONN_SENT; 716 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 717 718 ret = iw_cm_map(cm_id, true); 719 if (!ret) 720 ret = cm_id->device->iwcm->connect(cm_id, iw_param); 721 if (!ret) 722 return 0; /* success */ 723 724 spin_lock_irqsave(&cm_id_priv->lock, flags); 725 if (cm_id_priv->qp) { 726 cm_id->device->iwcm->rem_ref(qp); 727 cm_id_priv->qp = NULL; 728 } 729 cm_id_priv->state = IW_CM_STATE_IDLE; 730 err: 731 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 732 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 733 wake_up_all(&cm_id_priv->connect_wait); 734 return ret; 735 } 736 EXPORT_SYMBOL(iw_cm_connect); 737 738 /* 739 * Passive Side: new CM_ID <-- CONN_RECV 740 * 741 * Handles an inbound connect request. The function creates a new 742 * iw_cm_id to represent the new connection and inherits the client 743 * callback function and other attributes from the listening parent. 744 * 745 * The work item contains a pointer to the listen_cm_id and the event. The 746 * listen_cm_id contains the client cm_handler, context and 747 * device. These are copied when the device is cloned. The event 748 * contains the new four tuple. 749 * 750 * An error on the child should not affect the parent, so this 751 * function does not return a value. 752 */ 753 static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv, 754 struct iw_cm_event *iw_event) 755 { 756 unsigned long flags; 757 struct iw_cm_id *cm_id; 758 struct iwcm_id_private *cm_id_priv; 759 int ret; 760 761 /* 762 * The provider should never generate a connection request 763 * event with a bad status. 764 */ 765 BUG_ON(iw_event->status); 766 767 cm_id = iw_create_cm_id(listen_id_priv->id.device, 768 listen_id_priv->id.cm_handler, 769 listen_id_priv->id.context); 770 /* If the cm_id could not be created, ignore the request */ 771 if (IS_ERR(cm_id)) 772 goto out; 773 774 cm_id->provider_data = iw_event->provider_data; 775 cm_id->m_local_addr = iw_event->local_addr; 776 cm_id->m_remote_addr = iw_event->remote_addr; 777 cm_id->local_addr = listen_id_priv->id.local_addr; 778 779 ret = iwpm_get_remote_info(&listen_id_priv->id.m_local_addr, 780 &iw_event->remote_addr, 781 &cm_id->remote_addr, 782 RDMA_NL_IWCM); 783 if (ret) { 784 cm_id->remote_addr = iw_event->remote_addr; 785 } else { 786 iw_cm_check_wildcard(&listen_id_priv->id.m_local_addr, 787 &iw_event->local_addr, 788 &cm_id->local_addr); 789 iw_event->local_addr = cm_id->local_addr; 790 iw_event->remote_addr = cm_id->remote_addr; 791 } 792 793 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 794 cm_id_priv->state = IW_CM_STATE_CONN_RECV; 795 796 /* 797 * We could be destroying the listening id. If so, ignore this 798 * upcall. 799 */ 800 spin_lock_irqsave(&listen_id_priv->lock, flags); 801 if (listen_id_priv->state != IW_CM_STATE_LISTEN) { 802 spin_unlock_irqrestore(&listen_id_priv->lock, flags); 803 iw_cm_reject(cm_id, NULL, 0); 804 iw_destroy_cm_id(cm_id); 805 goto out; 806 } 807 spin_unlock_irqrestore(&listen_id_priv->lock, flags); 808 809 ret = alloc_work_entries(cm_id_priv, 3); 810 if (ret) { 811 iw_cm_reject(cm_id, NULL, 0); 812 iw_destroy_cm_id(cm_id); 813 goto out; 814 } 815 816 /* Call the client CM handler */ 817 ret = cm_id->cm_handler(cm_id, iw_event); 818 if (ret) { 819 iw_cm_reject(cm_id, NULL, 0); 820 iw_destroy_cm_id(cm_id); 821 } 822 823 out: 824 if (iw_event->private_data_len) 825 kfree(iw_event->private_data); 826 } 827 828 /* 829 * Passive Side: CM_ID <-- ESTABLISHED 830 * 831 * The provider generated an ESTABLISHED event which means that 832 * the MPA negotion has completed successfully and we are now in MPA 833 * FPDU mode. 834 * 835 * This event can only be received in the CONN_RECV state. If the 836 * remote peer closed, the ESTABLISHED event would be received followed 837 * by the CLOSE event. If the app closes, it will block until we wake 838 * it up after processing this event. 839 */ 840 static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv, 841 struct iw_cm_event *iw_event) 842 { 843 unsigned long flags; 844 int ret; 845 846 spin_lock_irqsave(&cm_id_priv->lock, flags); 847 848 /* 849 * We clear the CONNECT_WAIT bit here to allow the callback 850 * function to call iw_cm_disconnect. Calling iw_destroy_cm_id 851 * from a callback handler is not allowed. 852 */ 853 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 854 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV); 855 cm_id_priv->state = IW_CM_STATE_ESTABLISHED; 856 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 857 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); 858 wake_up_all(&cm_id_priv->connect_wait); 859 860 return ret; 861 } 862 863 /* 864 * Active Side: CM_ID <-- ESTABLISHED 865 * 866 * The app has called connect and is waiting for the established event to 867 * post it's requests to the server. This event will wake up anyone 868 * blocked in iw_cm_disconnect or iw_destroy_id. 869 */ 870 static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv, 871 struct iw_cm_event *iw_event) 872 { 873 unsigned long flags; 874 int ret; 875 876 spin_lock_irqsave(&cm_id_priv->lock, flags); 877 /* 878 * Clear the connect wait bit so a callback function calling 879 * iw_cm_disconnect will not wait and deadlock this thread 880 */ 881 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 882 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT); 883 if (iw_event->status == 0) { 884 cm_id_priv->id.m_local_addr = iw_event->local_addr; 885 cm_id_priv->id.m_remote_addr = iw_event->remote_addr; 886 iw_event->local_addr = cm_id_priv->id.local_addr; 887 iw_event->remote_addr = cm_id_priv->id.remote_addr; 888 cm_id_priv->state = IW_CM_STATE_ESTABLISHED; 889 } else { 890 /* REJECTED or RESET */ 891 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp); 892 cm_id_priv->qp = NULL; 893 cm_id_priv->state = IW_CM_STATE_IDLE; 894 } 895 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 896 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); 897 898 if (iw_event->private_data_len) 899 kfree(iw_event->private_data); 900 901 /* Wake up waiters on connect complete */ 902 wake_up_all(&cm_id_priv->connect_wait); 903 904 return ret; 905 } 906 907 /* 908 * CM_ID <-- CLOSING 909 * 910 * If in the ESTABLISHED state, move to CLOSING. 911 */ 912 static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv, 913 struct iw_cm_event *iw_event) 914 { 915 unsigned long flags; 916 917 spin_lock_irqsave(&cm_id_priv->lock, flags); 918 if (cm_id_priv->state == IW_CM_STATE_ESTABLISHED) 919 cm_id_priv->state = IW_CM_STATE_CLOSING; 920 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 921 } 922 923 /* 924 * CM_ID <-- IDLE 925 * 926 * If in the ESTBLISHED or CLOSING states, the QP will have have been 927 * moved by the provider to the ERR state. Disassociate the CM_ID from 928 * the QP, move to IDLE, and remove the 'connected' reference. 929 * 930 * If in some other state, the cm_id was destroyed asynchronously. 931 * This is the last reference that will result in waking up 932 * the app thread blocked in iw_destroy_cm_id. 933 */ 934 static int cm_close_handler(struct iwcm_id_private *cm_id_priv, 935 struct iw_cm_event *iw_event) 936 { 937 unsigned long flags; 938 int ret = 0; 939 spin_lock_irqsave(&cm_id_priv->lock, flags); 940 941 if (cm_id_priv->qp) { 942 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp); 943 cm_id_priv->qp = NULL; 944 } 945 switch (cm_id_priv->state) { 946 case IW_CM_STATE_ESTABLISHED: 947 case IW_CM_STATE_CLOSING: 948 cm_id_priv->state = IW_CM_STATE_IDLE; 949 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 950 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); 951 spin_lock_irqsave(&cm_id_priv->lock, flags); 952 break; 953 case IW_CM_STATE_DESTROYING: 954 break; 955 default: 956 BUG(); 957 } 958 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 959 960 return ret; 961 } 962 963 static int process_event(struct iwcm_id_private *cm_id_priv, 964 struct iw_cm_event *iw_event) 965 { 966 int ret = 0; 967 968 switch (iw_event->event) { 969 case IW_CM_EVENT_CONNECT_REQUEST: 970 cm_conn_req_handler(cm_id_priv, iw_event); 971 break; 972 case IW_CM_EVENT_CONNECT_REPLY: 973 ret = cm_conn_rep_handler(cm_id_priv, iw_event); 974 break; 975 case IW_CM_EVENT_ESTABLISHED: 976 ret = cm_conn_est_handler(cm_id_priv, iw_event); 977 break; 978 case IW_CM_EVENT_DISCONNECT: 979 cm_disconnect_handler(cm_id_priv, iw_event); 980 break; 981 case IW_CM_EVENT_CLOSE: 982 ret = cm_close_handler(cm_id_priv, iw_event); 983 break; 984 default: 985 BUG(); 986 } 987 988 return ret; 989 } 990 991 /* 992 * Process events on the work_list for the cm_id. If the callback 993 * function requests that the cm_id be deleted, a flag is set in the 994 * cm_id flags to indicate that when the last reference is 995 * removed, the cm_id is to be destroyed. This is necessary to 996 * distinguish between an object that will be destroyed by the app 997 * thread asleep on the destroy_comp list vs. an object destroyed 998 * here synchronously when the last reference is removed. 999 */ 1000 static void cm_work_handler(struct work_struct *_work) 1001 { 1002 struct iwcm_work *work = container_of(_work, struct iwcm_work, work); 1003 struct iw_cm_event levent; 1004 struct iwcm_id_private *cm_id_priv = work->cm_id; 1005 unsigned long flags; 1006 int empty; 1007 int ret = 0; 1008 1009 spin_lock_irqsave(&cm_id_priv->lock, flags); 1010 empty = list_empty(&cm_id_priv->work_list); 1011 while (!empty) { 1012 work = list_entry(cm_id_priv->work_list.next, 1013 struct iwcm_work, list); 1014 list_del_init(&work->list); 1015 empty = list_empty(&cm_id_priv->work_list); 1016 levent = work->event; 1017 put_work(work); 1018 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1019 1020 if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) { 1021 ret = process_event(cm_id_priv, &levent); 1022 if (ret) 1023 destroy_cm_id(&cm_id_priv->id); 1024 } else 1025 pr_debug("dropping event %d\n", levent.event); 1026 if (iwcm_deref_id(cm_id_priv)) 1027 return; 1028 if (empty) 1029 return; 1030 spin_lock_irqsave(&cm_id_priv->lock, flags); 1031 } 1032 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1033 } 1034 1035 /* 1036 * This function is called on interrupt context. Schedule events on 1037 * the iwcm_wq thread to allow callback functions to downcall into 1038 * the CM and/or block. Events are queued to a per-CM_ID 1039 * work_list. If this is the first event on the work_list, the work 1040 * element is also queued on the iwcm_wq thread. 1041 * 1042 * Each event holds a reference on the cm_id. Until the last posted 1043 * event has been delivered and processed, the cm_id cannot be 1044 * deleted. 1045 * 1046 * Returns: 1047 * 0 - the event was handled. 1048 * -ENOMEM - the event was not handled due to lack of resources. 1049 */ 1050 static int cm_event_handler(struct iw_cm_id *cm_id, 1051 struct iw_cm_event *iw_event) 1052 { 1053 struct iwcm_work *work; 1054 struct iwcm_id_private *cm_id_priv; 1055 unsigned long flags; 1056 int ret = 0; 1057 1058 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 1059 1060 spin_lock_irqsave(&cm_id_priv->lock, flags); 1061 work = get_work(cm_id_priv); 1062 if (!work) { 1063 ret = -ENOMEM; 1064 goto out; 1065 } 1066 1067 INIT_WORK(&work->work, cm_work_handler); 1068 work->cm_id = cm_id_priv; 1069 work->event = *iw_event; 1070 1071 if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST || 1072 work->event.event == IW_CM_EVENT_CONNECT_REPLY) && 1073 work->event.private_data_len) { 1074 ret = copy_private_data(&work->event); 1075 if (ret) { 1076 put_work(work); 1077 goto out; 1078 } 1079 } 1080 1081 atomic_inc(&cm_id_priv->refcount); 1082 if (list_empty(&cm_id_priv->work_list)) { 1083 list_add_tail(&work->list, &cm_id_priv->work_list); 1084 queue_work(iwcm_wq, &work->work); 1085 } else 1086 list_add_tail(&work->list, &cm_id_priv->work_list); 1087 out: 1088 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1089 return ret; 1090 } 1091 1092 static int iwcm_init_qp_init_attr(struct iwcm_id_private *cm_id_priv, 1093 struct ib_qp_attr *qp_attr, 1094 int *qp_attr_mask) 1095 { 1096 unsigned long flags; 1097 int ret; 1098 1099 spin_lock_irqsave(&cm_id_priv->lock, flags); 1100 switch (cm_id_priv->state) { 1101 case IW_CM_STATE_IDLE: 1102 case IW_CM_STATE_CONN_SENT: 1103 case IW_CM_STATE_CONN_RECV: 1104 case IW_CM_STATE_ESTABLISHED: 1105 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; 1106 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE| 1107 IB_ACCESS_REMOTE_READ; 1108 ret = 0; 1109 break; 1110 default: 1111 ret = -EINVAL; 1112 break; 1113 } 1114 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1115 return ret; 1116 } 1117 1118 static int iwcm_init_qp_rts_attr(struct iwcm_id_private *cm_id_priv, 1119 struct ib_qp_attr *qp_attr, 1120 int *qp_attr_mask) 1121 { 1122 unsigned long flags; 1123 int ret; 1124 1125 spin_lock_irqsave(&cm_id_priv->lock, flags); 1126 switch (cm_id_priv->state) { 1127 case IW_CM_STATE_IDLE: 1128 case IW_CM_STATE_CONN_SENT: 1129 case IW_CM_STATE_CONN_RECV: 1130 case IW_CM_STATE_ESTABLISHED: 1131 *qp_attr_mask = 0; 1132 ret = 0; 1133 break; 1134 default: 1135 ret = -EINVAL; 1136 break; 1137 } 1138 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1139 return ret; 1140 } 1141 1142 int iw_cm_init_qp_attr(struct iw_cm_id *cm_id, 1143 struct ib_qp_attr *qp_attr, 1144 int *qp_attr_mask) 1145 { 1146 struct iwcm_id_private *cm_id_priv; 1147 int ret; 1148 1149 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 1150 switch (qp_attr->qp_state) { 1151 case IB_QPS_INIT: 1152 case IB_QPS_RTR: 1153 ret = iwcm_init_qp_init_attr(cm_id_priv, 1154 qp_attr, qp_attr_mask); 1155 break; 1156 case IB_QPS_RTS: 1157 ret = iwcm_init_qp_rts_attr(cm_id_priv, 1158 qp_attr, qp_attr_mask); 1159 break; 1160 default: 1161 ret = -EINVAL; 1162 break; 1163 } 1164 return ret; 1165 } 1166 EXPORT_SYMBOL(iw_cm_init_qp_attr); 1167 1168 static int __init iw_cm_init(void) 1169 { 1170 int ret; 1171 1172 ret = iwpm_init(RDMA_NL_IWCM); 1173 if (ret) 1174 pr_err("iw_cm: couldn't init iwpm\n"); 1175 else 1176 rdma_nl_register(RDMA_NL_IWCM, iwcm_nl_cb_table); 1177 iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", 0); 1178 if (!iwcm_wq) 1179 return -ENOMEM; 1180 1181 iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm", 1182 iwcm_ctl_table); 1183 if (!iwcm_ctl_table_hdr) { 1184 pr_err("iw_cm: couldn't register sysctl paths\n"); 1185 destroy_workqueue(iwcm_wq); 1186 return -ENOMEM; 1187 } 1188 1189 return 0; 1190 } 1191 1192 static void __exit iw_cm_cleanup(void) 1193 { 1194 unregister_net_sysctl_table(iwcm_ctl_table_hdr); 1195 destroy_workqueue(iwcm_wq); 1196 rdma_nl_unregister(RDMA_NL_IWCM); 1197 iwpm_exit(RDMA_NL_IWCM); 1198 } 1199 1200 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_IWCM, 2); 1201 1202 module_init(iw_cm_init); 1203 module_exit(iw_cm_cleanup); 1204