1 /* 2 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. 3 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. 5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 6 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. 7 * Copyright (c) 2005 Network Appliance, Inc. All rights reserved. 8 * 9 * This software is available to you under a choice of one of two 10 * licenses. You may choose to be licensed under the terms of the GNU 11 * General Public License (GPL) Version 2, available from the file 12 * COPYING in the main directory of this source tree, or the 13 * OpenIB.org BSD license below: 14 * 15 * Redistribution and use in source and binary forms, with or 16 * without modification, are permitted provided that the following 17 * conditions are met: 18 * 19 * - Redistributions of source code must retain the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer. 22 * 23 * - Redistributions in binary form must reproduce the above 24 * copyright notice, this list of conditions and the following 25 * disclaimer in the documentation and/or other materials 26 * provided with the distribution. 27 * 28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 35 * SOFTWARE. 36 * 37 */ 38 #include <linux/dma-mapping.h> 39 #include <linux/err.h> 40 #include <linux/idr.h> 41 #include <linux/interrupt.h> 42 #include <linux/rbtree.h> 43 #include <linux/spinlock.h> 44 #include <linux/workqueue.h> 45 #include <linux/completion.h> 46 47 #include <rdma/iw_cm.h> 48 #include <rdma/ib_addr.h> 49 50 #include "iwcm.h" 51 52 MODULE_AUTHOR("Tom Tucker"); 53 MODULE_DESCRIPTION("iWARP CM"); 54 MODULE_LICENSE("Dual BSD/GPL"); 55 56 static struct workqueue_struct *iwcm_wq; 57 struct iwcm_work { 58 struct work_struct work; 59 struct iwcm_id_private *cm_id; 60 struct list_head list; 61 struct iw_cm_event event; 62 struct list_head free_list; 63 }; 64 65 /* 66 * The following services provide a mechanism for pre-allocating iwcm_work 67 * elements. The design pre-allocates them based on the cm_id type: 68 * LISTENING IDS: Get enough elements preallocated to handle the 69 * listen backlog. 70 * ACTIVE IDS: 4: CONNECT_REPLY, ESTABLISHED, DISCONNECT, CLOSE 71 * PASSIVE IDS: 3: ESTABLISHED, DISCONNECT, CLOSE 72 * 73 * Allocating them in connect and listen avoids having to deal 74 * with allocation failures on the event upcall from the provider (which 75 * is called in the interrupt context). 76 * 77 * One exception is when creating the cm_id for incoming connection requests. 78 * There are two cases: 79 * 1) in the event upcall, cm_event_handler(), for a listening cm_id. If 80 * the backlog is exceeded, then no more connection request events will 81 * be processed. cm_event_handler() returns -ENOMEM in this case. Its up 82 * to the provider to reject the connection request. 83 * 2) in the connection request workqueue handler, cm_conn_req_handler(). 84 * If work elements cannot be allocated for the new connect request cm_id, 85 * then IWCM will call the provider reject method. This is ok since 86 * cm_conn_req_handler() runs in the workqueue thread context. 87 */ 88 89 static struct iwcm_work *get_work(struct iwcm_id_private *cm_id_priv) 90 { 91 struct iwcm_work *work; 92 93 if (list_empty(&cm_id_priv->work_free_list)) 94 return NULL; 95 work = list_entry(cm_id_priv->work_free_list.next, struct iwcm_work, 96 free_list); 97 list_del_init(&work->free_list); 98 return work; 99 } 100 101 static void put_work(struct iwcm_work *work) 102 { 103 list_add(&work->free_list, &work->cm_id->work_free_list); 104 } 105 106 static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv) 107 { 108 struct list_head *e, *tmp; 109 110 list_for_each_safe(e, tmp, &cm_id_priv->work_free_list) 111 kfree(list_entry(e, struct iwcm_work, free_list)); 112 } 113 114 static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count) 115 { 116 struct iwcm_work *work; 117 118 BUG_ON(!list_empty(&cm_id_priv->work_free_list)); 119 while (count--) { 120 work = kmalloc(sizeof(struct iwcm_work), GFP_KERNEL); 121 if (!work) { 122 dealloc_work_entries(cm_id_priv); 123 return -ENOMEM; 124 } 125 work->cm_id = cm_id_priv; 126 INIT_LIST_HEAD(&work->list); 127 put_work(work); 128 } 129 return 0; 130 } 131 132 /* 133 * Save private data from incoming connection requests to 134 * iw_cm_event, so the low level driver doesn't have to. Adjust 135 * the event ptr to point to the local copy. 136 */ 137 static int copy_private_data(struct iw_cm_event *event) 138 { 139 void *p; 140 141 p = kmemdup(event->private_data, event->private_data_len, GFP_ATOMIC); 142 if (!p) 143 return -ENOMEM; 144 event->private_data = p; 145 return 0; 146 } 147 148 static void free_cm_id(struct iwcm_id_private *cm_id_priv) 149 { 150 dealloc_work_entries(cm_id_priv); 151 kfree(cm_id_priv); 152 } 153 154 /* 155 * Release a reference on cm_id. If the last reference is being 156 * released, enable the waiting thread (in iw_destroy_cm_id) to 157 * get woken up, and return 1 if a thread is already waiting. 158 */ 159 static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv) 160 { 161 BUG_ON(atomic_read(&cm_id_priv->refcount)==0); 162 if (atomic_dec_and_test(&cm_id_priv->refcount)) { 163 BUG_ON(!list_empty(&cm_id_priv->work_list)); 164 complete(&cm_id_priv->destroy_comp); 165 return 1; 166 } 167 168 return 0; 169 } 170 171 static void add_ref(struct iw_cm_id *cm_id) 172 { 173 struct iwcm_id_private *cm_id_priv; 174 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 175 atomic_inc(&cm_id_priv->refcount); 176 } 177 178 static void rem_ref(struct iw_cm_id *cm_id) 179 { 180 struct iwcm_id_private *cm_id_priv; 181 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 182 if (iwcm_deref_id(cm_id_priv) && 183 test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)) { 184 BUG_ON(!list_empty(&cm_id_priv->work_list)); 185 free_cm_id(cm_id_priv); 186 } 187 } 188 189 static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event); 190 191 struct iw_cm_id *iw_create_cm_id(struct ib_device *device, 192 iw_cm_handler cm_handler, 193 void *context) 194 { 195 struct iwcm_id_private *cm_id_priv; 196 197 cm_id_priv = kzalloc(sizeof(*cm_id_priv), GFP_KERNEL); 198 if (!cm_id_priv) 199 return ERR_PTR(-ENOMEM); 200 201 cm_id_priv->state = IW_CM_STATE_IDLE; 202 cm_id_priv->id.device = device; 203 cm_id_priv->id.cm_handler = cm_handler; 204 cm_id_priv->id.context = context; 205 cm_id_priv->id.event_handler = cm_event_handler; 206 cm_id_priv->id.add_ref = add_ref; 207 cm_id_priv->id.rem_ref = rem_ref; 208 spin_lock_init(&cm_id_priv->lock); 209 atomic_set(&cm_id_priv->refcount, 1); 210 init_waitqueue_head(&cm_id_priv->connect_wait); 211 init_completion(&cm_id_priv->destroy_comp); 212 INIT_LIST_HEAD(&cm_id_priv->work_list); 213 INIT_LIST_HEAD(&cm_id_priv->work_free_list); 214 215 return &cm_id_priv->id; 216 } 217 EXPORT_SYMBOL(iw_create_cm_id); 218 219 220 static int iwcm_modify_qp_err(struct ib_qp *qp) 221 { 222 struct ib_qp_attr qp_attr; 223 224 if (!qp) 225 return -EINVAL; 226 227 qp_attr.qp_state = IB_QPS_ERR; 228 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE); 229 } 230 231 /* 232 * This is really the RDMAC CLOSING state. It is most similar to the 233 * IB SQD QP state. 234 */ 235 static int iwcm_modify_qp_sqd(struct ib_qp *qp) 236 { 237 struct ib_qp_attr qp_attr; 238 239 BUG_ON(qp == NULL); 240 qp_attr.qp_state = IB_QPS_SQD; 241 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE); 242 } 243 244 /* 245 * CM_ID <-- CLOSING 246 * 247 * Block if a passive or active connection is currently being processed. Then 248 * process the event as follows: 249 * - If we are ESTABLISHED, move to CLOSING and modify the QP state 250 * based on the abrupt flag 251 * - If the connection is already in the CLOSING or IDLE state, the peer is 252 * disconnecting concurrently with us and we've already seen the 253 * DISCONNECT event -- ignore the request and return 0 254 * - Disconnect on a listening endpoint returns -EINVAL 255 */ 256 int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt) 257 { 258 struct iwcm_id_private *cm_id_priv; 259 unsigned long flags; 260 int ret = 0; 261 struct ib_qp *qp = NULL; 262 263 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 264 /* Wait if we're currently in a connect or accept downcall */ 265 wait_event(cm_id_priv->connect_wait, 266 !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags)); 267 268 spin_lock_irqsave(&cm_id_priv->lock, flags); 269 switch (cm_id_priv->state) { 270 case IW_CM_STATE_ESTABLISHED: 271 cm_id_priv->state = IW_CM_STATE_CLOSING; 272 273 /* QP could be <nul> for user-mode client */ 274 if (cm_id_priv->qp) 275 qp = cm_id_priv->qp; 276 else 277 ret = -EINVAL; 278 break; 279 case IW_CM_STATE_LISTEN: 280 ret = -EINVAL; 281 break; 282 case IW_CM_STATE_CLOSING: 283 /* remote peer closed first */ 284 case IW_CM_STATE_IDLE: 285 /* accept or connect returned !0 */ 286 break; 287 case IW_CM_STATE_CONN_RECV: 288 /* 289 * App called disconnect before/without calling accept after 290 * connect_request event delivered. 291 */ 292 break; 293 case IW_CM_STATE_CONN_SENT: 294 /* Can only get here if wait above fails */ 295 default: 296 BUG(); 297 } 298 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 299 300 if (qp) { 301 if (abrupt) 302 ret = iwcm_modify_qp_err(qp); 303 else 304 ret = iwcm_modify_qp_sqd(qp); 305 306 /* 307 * If both sides are disconnecting the QP could 308 * already be in ERR or SQD states 309 */ 310 ret = 0; 311 } 312 313 return ret; 314 } 315 EXPORT_SYMBOL(iw_cm_disconnect); 316 317 /* 318 * CM_ID <-- DESTROYING 319 * 320 * Clean up all resources associated with the connection and release 321 * the initial reference taken by iw_create_cm_id. 322 */ 323 static void destroy_cm_id(struct iw_cm_id *cm_id) 324 { 325 struct iwcm_id_private *cm_id_priv; 326 unsigned long flags; 327 int ret; 328 329 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 330 /* 331 * Wait if we're currently in a connect or accept downcall. A 332 * listening endpoint should never block here. 333 */ 334 wait_event(cm_id_priv->connect_wait, 335 !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags)); 336 337 spin_lock_irqsave(&cm_id_priv->lock, flags); 338 switch (cm_id_priv->state) { 339 case IW_CM_STATE_LISTEN: 340 cm_id_priv->state = IW_CM_STATE_DESTROYING; 341 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 342 /* destroy the listening endpoint */ 343 ret = cm_id->device->iwcm->destroy_listen(cm_id); 344 spin_lock_irqsave(&cm_id_priv->lock, flags); 345 break; 346 case IW_CM_STATE_ESTABLISHED: 347 cm_id_priv->state = IW_CM_STATE_DESTROYING; 348 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 349 /* Abrupt close of the connection */ 350 (void)iwcm_modify_qp_err(cm_id_priv->qp); 351 spin_lock_irqsave(&cm_id_priv->lock, flags); 352 break; 353 case IW_CM_STATE_IDLE: 354 case IW_CM_STATE_CLOSING: 355 cm_id_priv->state = IW_CM_STATE_DESTROYING; 356 break; 357 case IW_CM_STATE_CONN_RECV: 358 /* 359 * App called destroy before/without calling accept after 360 * receiving connection request event notification or 361 * returned non zero from the event callback function. 362 * In either case, must tell the provider to reject. 363 */ 364 cm_id_priv->state = IW_CM_STATE_DESTROYING; 365 cm_id->device->iwcm->reject(cm_id, NULL, 0); 366 break; 367 case IW_CM_STATE_CONN_SENT: 368 case IW_CM_STATE_DESTROYING: 369 default: 370 BUG(); 371 break; 372 } 373 if (cm_id_priv->qp) { 374 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp); 375 cm_id_priv->qp = NULL; 376 } 377 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 378 379 (void)iwcm_deref_id(cm_id_priv); 380 } 381 382 /* 383 * This function is only called by the application thread and cannot 384 * be called by the event thread. The function will wait for all 385 * references to be released on the cm_id and then kfree the cm_id 386 * object. 387 */ 388 void iw_destroy_cm_id(struct iw_cm_id *cm_id) 389 { 390 struct iwcm_id_private *cm_id_priv; 391 392 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 393 BUG_ON(test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)); 394 395 destroy_cm_id(cm_id); 396 397 wait_for_completion(&cm_id_priv->destroy_comp); 398 399 free_cm_id(cm_id_priv); 400 } 401 EXPORT_SYMBOL(iw_destroy_cm_id); 402 403 /* 404 * CM_ID <-- LISTEN 405 * 406 * Start listening for connect requests. Generates one CONNECT_REQUEST 407 * event for each inbound connect request. 408 */ 409 int iw_cm_listen(struct iw_cm_id *cm_id, int backlog) 410 { 411 struct iwcm_id_private *cm_id_priv; 412 unsigned long flags; 413 int ret; 414 415 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 416 417 ret = alloc_work_entries(cm_id_priv, backlog); 418 if (ret) 419 return ret; 420 421 spin_lock_irqsave(&cm_id_priv->lock, flags); 422 switch (cm_id_priv->state) { 423 case IW_CM_STATE_IDLE: 424 cm_id_priv->state = IW_CM_STATE_LISTEN; 425 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 426 ret = cm_id->device->iwcm->create_listen(cm_id, backlog); 427 if (ret) 428 cm_id_priv->state = IW_CM_STATE_IDLE; 429 spin_lock_irqsave(&cm_id_priv->lock, flags); 430 break; 431 default: 432 ret = -EINVAL; 433 } 434 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 435 436 return ret; 437 } 438 EXPORT_SYMBOL(iw_cm_listen); 439 440 /* 441 * CM_ID <-- IDLE 442 * 443 * Rejects an inbound connection request. No events are generated. 444 */ 445 int iw_cm_reject(struct iw_cm_id *cm_id, 446 const void *private_data, 447 u8 private_data_len) 448 { 449 struct iwcm_id_private *cm_id_priv; 450 unsigned long flags; 451 int ret; 452 453 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 454 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 455 456 spin_lock_irqsave(&cm_id_priv->lock, flags); 457 if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) { 458 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 459 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 460 wake_up_all(&cm_id_priv->connect_wait); 461 return -EINVAL; 462 } 463 cm_id_priv->state = IW_CM_STATE_IDLE; 464 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 465 466 ret = cm_id->device->iwcm->reject(cm_id, private_data, 467 private_data_len); 468 469 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 470 wake_up_all(&cm_id_priv->connect_wait); 471 472 return ret; 473 } 474 EXPORT_SYMBOL(iw_cm_reject); 475 476 /* 477 * CM_ID <-- ESTABLISHED 478 * 479 * Accepts an inbound connection request and generates an ESTABLISHED 480 * event. Callers of iw_cm_disconnect and iw_destroy_cm_id will block 481 * until the ESTABLISHED event is received from the provider. 482 */ 483 int iw_cm_accept(struct iw_cm_id *cm_id, 484 struct iw_cm_conn_param *iw_param) 485 { 486 struct iwcm_id_private *cm_id_priv; 487 struct ib_qp *qp; 488 unsigned long flags; 489 int ret; 490 491 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 492 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 493 494 spin_lock_irqsave(&cm_id_priv->lock, flags); 495 if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) { 496 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 497 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 498 wake_up_all(&cm_id_priv->connect_wait); 499 return -EINVAL; 500 } 501 /* Get the ib_qp given the QPN */ 502 qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); 503 if (!qp) { 504 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 505 return -EINVAL; 506 } 507 cm_id->device->iwcm->add_ref(qp); 508 cm_id_priv->qp = qp; 509 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 510 511 ret = cm_id->device->iwcm->accept(cm_id, iw_param); 512 if (ret) { 513 /* An error on accept precludes provider events */ 514 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV); 515 cm_id_priv->state = IW_CM_STATE_IDLE; 516 spin_lock_irqsave(&cm_id_priv->lock, flags); 517 if (cm_id_priv->qp) { 518 cm_id->device->iwcm->rem_ref(qp); 519 cm_id_priv->qp = NULL; 520 } 521 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 522 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 523 wake_up_all(&cm_id_priv->connect_wait); 524 } 525 526 return ret; 527 } 528 EXPORT_SYMBOL(iw_cm_accept); 529 530 /* 531 * Active Side: CM_ID <-- CONN_SENT 532 * 533 * If successful, results in the generation of a CONNECT_REPLY 534 * event. iw_cm_disconnect and iw_cm_destroy will block until the 535 * CONNECT_REPLY event is received from the provider. 536 */ 537 int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) 538 { 539 struct iwcm_id_private *cm_id_priv; 540 int ret; 541 unsigned long flags; 542 struct ib_qp *qp; 543 544 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 545 546 ret = alloc_work_entries(cm_id_priv, 4); 547 if (ret) 548 return ret; 549 550 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 551 spin_lock_irqsave(&cm_id_priv->lock, flags); 552 553 if (cm_id_priv->state != IW_CM_STATE_IDLE) { 554 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 555 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 556 wake_up_all(&cm_id_priv->connect_wait); 557 return -EINVAL; 558 } 559 560 /* Get the ib_qp given the QPN */ 561 qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); 562 if (!qp) { 563 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 564 return -EINVAL; 565 } 566 cm_id->device->iwcm->add_ref(qp); 567 cm_id_priv->qp = qp; 568 cm_id_priv->state = IW_CM_STATE_CONN_SENT; 569 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 570 571 ret = cm_id->device->iwcm->connect(cm_id, iw_param); 572 if (ret) { 573 spin_lock_irqsave(&cm_id_priv->lock, flags); 574 if (cm_id_priv->qp) { 575 cm_id->device->iwcm->rem_ref(qp); 576 cm_id_priv->qp = NULL; 577 } 578 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 579 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT); 580 cm_id_priv->state = IW_CM_STATE_IDLE; 581 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 582 wake_up_all(&cm_id_priv->connect_wait); 583 } 584 585 return ret; 586 } 587 EXPORT_SYMBOL(iw_cm_connect); 588 589 /* 590 * Passive Side: new CM_ID <-- CONN_RECV 591 * 592 * Handles an inbound connect request. The function creates a new 593 * iw_cm_id to represent the new connection and inherits the client 594 * callback function and other attributes from the listening parent. 595 * 596 * The work item contains a pointer to the listen_cm_id and the event. The 597 * listen_cm_id contains the client cm_handler, context and 598 * device. These are copied when the device is cloned. The event 599 * contains the new four tuple. 600 * 601 * An error on the child should not affect the parent, so this 602 * function does not return a value. 603 */ 604 static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv, 605 struct iw_cm_event *iw_event) 606 { 607 unsigned long flags; 608 struct iw_cm_id *cm_id; 609 struct iwcm_id_private *cm_id_priv; 610 int ret; 611 612 /* 613 * The provider should never generate a connection request 614 * event with a bad status. 615 */ 616 BUG_ON(iw_event->status); 617 618 /* 619 * We could be destroying the listening id. If so, ignore this 620 * upcall. 621 */ 622 spin_lock_irqsave(&listen_id_priv->lock, flags); 623 if (listen_id_priv->state != IW_CM_STATE_LISTEN) { 624 spin_unlock_irqrestore(&listen_id_priv->lock, flags); 625 goto out; 626 } 627 spin_unlock_irqrestore(&listen_id_priv->lock, flags); 628 629 cm_id = iw_create_cm_id(listen_id_priv->id.device, 630 listen_id_priv->id.cm_handler, 631 listen_id_priv->id.context); 632 /* If the cm_id could not be created, ignore the request */ 633 if (IS_ERR(cm_id)) 634 goto out; 635 636 cm_id->provider_data = iw_event->provider_data; 637 cm_id->local_addr = iw_event->local_addr; 638 cm_id->remote_addr = iw_event->remote_addr; 639 640 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 641 cm_id_priv->state = IW_CM_STATE_CONN_RECV; 642 643 ret = alloc_work_entries(cm_id_priv, 3); 644 if (ret) { 645 iw_cm_reject(cm_id, NULL, 0); 646 iw_destroy_cm_id(cm_id); 647 goto out; 648 } 649 650 /* Call the client CM handler */ 651 ret = cm_id->cm_handler(cm_id, iw_event); 652 if (ret) { 653 iw_cm_reject(cm_id, NULL, 0); 654 set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); 655 destroy_cm_id(cm_id); 656 if (atomic_read(&cm_id_priv->refcount)==0) 657 free_cm_id(cm_id_priv); 658 } 659 660 out: 661 if (iw_event->private_data_len) 662 kfree(iw_event->private_data); 663 } 664 665 /* 666 * Passive Side: CM_ID <-- ESTABLISHED 667 * 668 * The provider generated an ESTABLISHED event which means that 669 * the MPA negotion has completed successfully and we are now in MPA 670 * FPDU mode. 671 * 672 * This event can only be received in the CONN_RECV state. If the 673 * remote peer closed, the ESTABLISHED event would be received followed 674 * by the CLOSE event. If the app closes, it will block until we wake 675 * it up after processing this event. 676 */ 677 static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv, 678 struct iw_cm_event *iw_event) 679 { 680 unsigned long flags; 681 int ret; 682 683 spin_lock_irqsave(&cm_id_priv->lock, flags); 684 685 /* 686 * We clear the CONNECT_WAIT bit here to allow the callback 687 * function to call iw_cm_disconnect. Calling iw_destroy_cm_id 688 * from a callback handler is not allowed. 689 */ 690 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 691 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV); 692 cm_id_priv->state = IW_CM_STATE_ESTABLISHED; 693 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 694 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); 695 wake_up_all(&cm_id_priv->connect_wait); 696 697 return ret; 698 } 699 700 /* 701 * Active Side: CM_ID <-- ESTABLISHED 702 * 703 * The app has called connect and is waiting for the established event to 704 * post it's requests to the server. This event will wake up anyone 705 * blocked in iw_cm_disconnect or iw_destroy_id. 706 */ 707 static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv, 708 struct iw_cm_event *iw_event) 709 { 710 unsigned long flags; 711 int ret; 712 713 spin_lock_irqsave(&cm_id_priv->lock, flags); 714 /* 715 * Clear the connect wait bit so a callback function calling 716 * iw_cm_disconnect will not wait and deadlock this thread 717 */ 718 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 719 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT); 720 if (iw_event->status == IW_CM_EVENT_STATUS_ACCEPTED) { 721 cm_id_priv->id.local_addr = iw_event->local_addr; 722 cm_id_priv->id.remote_addr = iw_event->remote_addr; 723 cm_id_priv->state = IW_CM_STATE_ESTABLISHED; 724 } else { 725 /* REJECTED or RESET */ 726 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp); 727 cm_id_priv->qp = NULL; 728 cm_id_priv->state = IW_CM_STATE_IDLE; 729 } 730 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 731 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); 732 733 if (iw_event->private_data_len) 734 kfree(iw_event->private_data); 735 736 /* Wake up waiters on connect complete */ 737 wake_up_all(&cm_id_priv->connect_wait); 738 739 return ret; 740 } 741 742 /* 743 * CM_ID <-- CLOSING 744 * 745 * If in the ESTABLISHED state, move to CLOSING. 746 */ 747 static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv, 748 struct iw_cm_event *iw_event) 749 { 750 unsigned long flags; 751 752 spin_lock_irqsave(&cm_id_priv->lock, flags); 753 if (cm_id_priv->state == IW_CM_STATE_ESTABLISHED) 754 cm_id_priv->state = IW_CM_STATE_CLOSING; 755 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 756 } 757 758 /* 759 * CM_ID <-- IDLE 760 * 761 * If in the ESTBLISHED or CLOSING states, the QP will have have been 762 * moved by the provider to the ERR state. Disassociate the CM_ID from 763 * the QP, move to IDLE, and remove the 'connected' reference. 764 * 765 * If in some other state, the cm_id was destroyed asynchronously. 766 * This is the last reference that will result in waking up 767 * the app thread blocked in iw_destroy_cm_id. 768 */ 769 static int cm_close_handler(struct iwcm_id_private *cm_id_priv, 770 struct iw_cm_event *iw_event) 771 { 772 unsigned long flags; 773 int ret = 0; 774 spin_lock_irqsave(&cm_id_priv->lock, flags); 775 776 if (cm_id_priv->qp) { 777 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp); 778 cm_id_priv->qp = NULL; 779 } 780 switch (cm_id_priv->state) { 781 case IW_CM_STATE_ESTABLISHED: 782 case IW_CM_STATE_CLOSING: 783 cm_id_priv->state = IW_CM_STATE_IDLE; 784 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 785 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); 786 spin_lock_irqsave(&cm_id_priv->lock, flags); 787 break; 788 case IW_CM_STATE_DESTROYING: 789 break; 790 default: 791 BUG(); 792 } 793 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 794 795 return ret; 796 } 797 798 static int process_event(struct iwcm_id_private *cm_id_priv, 799 struct iw_cm_event *iw_event) 800 { 801 int ret = 0; 802 803 switch (iw_event->event) { 804 case IW_CM_EVENT_CONNECT_REQUEST: 805 cm_conn_req_handler(cm_id_priv, iw_event); 806 break; 807 case IW_CM_EVENT_CONNECT_REPLY: 808 ret = cm_conn_rep_handler(cm_id_priv, iw_event); 809 break; 810 case IW_CM_EVENT_ESTABLISHED: 811 ret = cm_conn_est_handler(cm_id_priv, iw_event); 812 break; 813 case IW_CM_EVENT_DISCONNECT: 814 cm_disconnect_handler(cm_id_priv, iw_event); 815 break; 816 case IW_CM_EVENT_CLOSE: 817 ret = cm_close_handler(cm_id_priv, iw_event); 818 break; 819 default: 820 BUG(); 821 } 822 823 return ret; 824 } 825 826 /* 827 * Process events on the work_list for the cm_id. If the callback 828 * function requests that the cm_id be deleted, a flag is set in the 829 * cm_id flags to indicate that when the last reference is 830 * removed, the cm_id is to be destroyed. This is necessary to 831 * distinguish between an object that will be destroyed by the app 832 * thread asleep on the destroy_comp list vs. an object destroyed 833 * here synchronously when the last reference is removed. 834 */ 835 static void cm_work_handler(struct work_struct *_work) 836 { 837 struct iwcm_work *work = container_of(_work, struct iwcm_work, work); 838 struct iw_cm_event levent; 839 struct iwcm_id_private *cm_id_priv = work->cm_id; 840 unsigned long flags; 841 int empty; 842 int ret = 0; 843 int destroy_id; 844 845 spin_lock_irqsave(&cm_id_priv->lock, flags); 846 empty = list_empty(&cm_id_priv->work_list); 847 while (!empty) { 848 work = list_entry(cm_id_priv->work_list.next, 849 struct iwcm_work, list); 850 list_del_init(&work->list); 851 empty = list_empty(&cm_id_priv->work_list); 852 levent = work->event; 853 put_work(work); 854 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 855 856 ret = process_event(cm_id_priv, &levent); 857 if (ret) { 858 set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); 859 destroy_cm_id(&cm_id_priv->id); 860 } 861 BUG_ON(atomic_read(&cm_id_priv->refcount)==0); 862 destroy_id = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); 863 if (iwcm_deref_id(cm_id_priv)) { 864 if (destroy_id) { 865 BUG_ON(!list_empty(&cm_id_priv->work_list)); 866 free_cm_id(cm_id_priv); 867 } 868 return; 869 } 870 spin_lock_irqsave(&cm_id_priv->lock, flags); 871 } 872 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 873 } 874 875 /* 876 * This function is called on interrupt context. Schedule events on 877 * the iwcm_wq thread to allow callback functions to downcall into 878 * the CM and/or block. Events are queued to a per-CM_ID 879 * work_list. If this is the first event on the work_list, the work 880 * element is also queued on the iwcm_wq thread. 881 * 882 * Each event holds a reference on the cm_id. Until the last posted 883 * event has been delivered and processed, the cm_id cannot be 884 * deleted. 885 * 886 * Returns: 887 * 0 - the event was handled. 888 * -ENOMEM - the event was not handled due to lack of resources. 889 */ 890 static int cm_event_handler(struct iw_cm_id *cm_id, 891 struct iw_cm_event *iw_event) 892 { 893 struct iwcm_work *work; 894 struct iwcm_id_private *cm_id_priv; 895 unsigned long flags; 896 int ret = 0; 897 898 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 899 900 spin_lock_irqsave(&cm_id_priv->lock, flags); 901 work = get_work(cm_id_priv); 902 if (!work) { 903 ret = -ENOMEM; 904 goto out; 905 } 906 907 INIT_WORK(&work->work, cm_work_handler); 908 work->cm_id = cm_id_priv; 909 work->event = *iw_event; 910 911 if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST || 912 work->event.event == IW_CM_EVENT_CONNECT_REPLY) && 913 work->event.private_data_len) { 914 ret = copy_private_data(&work->event); 915 if (ret) { 916 put_work(work); 917 goto out; 918 } 919 } 920 921 atomic_inc(&cm_id_priv->refcount); 922 if (list_empty(&cm_id_priv->work_list)) { 923 list_add_tail(&work->list, &cm_id_priv->work_list); 924 queue_work(iwcm_wq, &work->work); 925 } else 926 list_add_tail(&work->list, &cm_id_priv->work_list); 927 out: 928 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 929 return ret; 930 } 931 932 static int iwcm_init_qp_init_attr(struct iwcm_id_private *cm_id_priv, 933 struct ib_qp_attr *qp_attr, 934 int *qp_attr_mask) 935 { 936 unsigned long flags; 937 int ret; 938 939 spin_lock_irqsave(&cm_id_priv->lock, flags); 940 switch (cm_id_priv->state) { 941 case IW_CM_STATE_IDLE: 942 case IW_CM_STATE_CONN_SENT: 943 case IW_CM_STATE_CONN_RECV: 944 case IW_CM_STATE_ESTABLISHED: 945 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; 946 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE| 947 IB_ACCESS_REMOTE_READ; 948 ret = 0; 949 break; 950 default: 951 ret = -EINVAL; 952 break; 953 } 954 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 955 return ret; 956 } 957 958 static int iwcm_init_qp_rts_attr(struct iwcm_id_private *cm_id_priv, 959 struct ib_qp_attr *qp_attr, 960 int *qp_attr_mask) 961 { 962 unsigned long flags; 963 int ret; 964 965 spin_lock_irqsave(&cm_id_priv->lock, flags); 966 switch (cm_id_priv->state) { 967 case IW_CM_STATE_IDLE: 968 case IW_CM_STATE_CONN_SENT: 969 case IW_CM_STATE_CONN_RECV: 970 case IW_CM_STATE_ESTABLISHED: 971 *qp_attr_mask = 0; 972 ret = 0; 973 break; 974 default: 975 ret = -EINVAL; 976 break; 977 } 978 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 979 return ret; 980 } 981 982 int iw_cm_init_qp_attr(struct iw_cm_id *cm_id, 983 struct ib_qp_attr *qp_attr, 984 int *qp_attr_mask) 985 { 986 struct iwcm_id_private *cm_id_priv; 987 int ret; 988 989 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 990 switch (qp_attr->qp_state) { 991 case IB_QPS_INIT: 992 case IB_QPS_RTR: 993 ret = iwcm_init_qp_init_attr(cm_id_priv, 994 qp_attr, qp_attr_mask); 995 break; 996 case IB_QPS_RTS: 997 ret = iwcm_init_qp_rts_attr(cm_id_priv, 998 qp_attr, qp_attr_mask); 999 break; 1000 default: 1001 ret = -EINVAL; 1002 break; 1003 } 1004 return ret; 1005 } 1006 EXPORT_SYMBOL(iw_cm_init_qp_attr); 1007 1008 static int __init iw_cm_init(void) 1009 { 1010 iwcm_wq = create_singlethread_workqueue("iw_cm_wq"); 1011 if (!iwcm_wq) 1012 return -ENOMEM; 1013 1014 return 0; 1015 } 1016 1017 static void __exit iw_cm_cleanup(void) 1018 { 1019 destroy_workqueue(iwcm_wq); 1020 } 1021 1022 module_init(iw_cm_init); 1023 module_exit(iw_cm_cleanup); 1024