1 /* 2 * Copyright (c) 2004-2006 Intel Corporation. All rights reserved. 3 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. 5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 * 35 * $Id: cm.c 4311 2005-12-05 18:42:01Z sean.hefty $ 36 */ 37 38 #include <linux/completion.h> 39 #include <linux/dma-mapping.h> 40 #include <linux/err.h> 41 #include <linux/idr.h> 42 #include <linux/interrupt.h> 43 #include <linux/random.h> 44 #include <linux/rbtree.h> 45 #include <linux/spinlock.h> 46 #include <linux/workqueue.h> 47 48 #include <rdma/ib_cache.h> 49 #include <rdma/ib_cm.h> 50 #include "cm_msgs.h" 51 52 MODULE_AUTHOR("Sean Hefty"); 53 MODULE_DESCRIPTION("InfiniBand CM"); 54 MODULE_LICENSE("Dual BSD/GPL"); 55 56 static void cm_add_one(struct ib_device *device); 57 static void cm_remove_one(struct ib_device *device); 58 59 static struct ib_client cm_client = { 60 .name = "cm", 61 .add = cm_add_one, 62 .remove = cm_remove_one 63 }; 64 65 static struct ib_cm { 66 spinlock_t lock; 67 struct list_head device_list; 68 rwlock_t device_lock; 69 struct rb_root listen_service_table; 70 u64 listen_service_id; 71 /* struct rb_root peer_service_table; todo: fix peer to peer */ 72 struct rb_root remote_qp_table; 73 struct rb_root remote_id_table; 74 struct rb_root remote_sidr_table; 75 struct idr local_id_table; 76 __be32 random_id_operand; 77 struct list_head timewait_list; 78 struct workqueue_struct *wq; 79 } cm; 80 81 struct cm_port { 82 struct cm_device *cm_dev; 83 struct ib_mad_agent *mad_agent; 84 u8 port_num; 85 }; 86 87 struct cm_device { 88 struct list_head list; 89 struct ib_device *device; 90 struct cm_port port[0]; 91 }; 92 93 struct cm_av { 94 struct cm_port *port; 95 union ib_gid dgid; 96 struct ib_ah_attr ah_attr; 97 u16 pkey_index; 98 u8 packet_life_time; 99 }; 100 101 struct cm_work { 102 struct delayed_work work; 103 struct list_head list; 104 struct cm_port *port; 105 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ 106 __be32 local_id; /* Established / timewait */ 107 __be32 remote_id; 108 struct ib_cm_event cm_event; 109 struct ib_sa_path_rec path[0]; 110 }; 111 112 struct cm_timewait_info { 113 struct cm_work work; /* Must be first. */ 114 struct list_head list; 115 struct rb_node remote_qp_node; 116 struct rb_node remote_id_node; 117 __be64 remote_ca_guid; 118 __be32 remote_qpn; 119 u8 inserted_remote_qp; 120 u8 inserted_remote_id; 121 }; 122 123 struct cm_id_private { 124 struct ib_cm_id id; 125 126 struct rb_node service_node; 127 struct rb_node sidr_id_node; 128 spinlock_t lock; /* Do not acquire inside cm.lock */ 129 struct completion comp; 130 atomic_t refcount; 131 132 struct ib_mad_send_buf *msg; 133 struct cm_timewait_info *timewait_info; 134 /* todo: use alternate port on send failure */ 135 struct cm_av av; 136 struct cm_av alt_av; 137 struct ib_cm_compare_data *compare_data; 138 139 void *private_data; 140 __be64 tid; 141 __be32 local_qpn; 142 __be32 remote_qpn; 143 enum ib_qp_type qp_type; 144 __be32 sq_psn; 145 __be32 rq_psn; 146 int timeout_ms; 147 enum ib_mtu path_mtu; 148 __be16 pkey; 149 u8 private_data_len; 150 u8 max_cm_retries; 151 u8 peer_to_peer; 152 u8 responder_resources; 153 u8 initiator_depth; 154 u8 retry_count; 155 u8 rnr_retry_count; 156 u8 service_timeout; 157 158 struct list_head work_list; 159 atomic_t work_count; 160 }; 161 162 static void cm_work_handler(struct work_struct *work); 163 164 static inline void cm_deref_id(struct cm_id_private *cm_id_priv) 165 { 166 if (atomic_dec_and_test(&cm_id_priv->refcount)) 167 complete(&cm_id_priv->comp); 168 } 169 170 static int cm_alloc_msg(struct cm_id_private *cm_id_priv, 171 struct ib_mad_send_buf **msg) 172 { 173 struct ib_mad_agent *mad_agent; 174 struct ib_mad_send_buf *m; 175 struct ib_ah *ah; 176 177 mad_agent = cm_id_priv->av.port->mad_agent; 178 ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr); 179 if (IS_ERR(ah)) 180 return PTR_ERR(ah); 181 182 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, 183 cm_id_priv->av.pkey_index, 184 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 185 GFP_ATOMIC); 186 if (IS_ERR(m)) { 187 ib_destroy_ah(ah); 188 return PTR_ERR(m); 189 } 190 191 /* Timeout set by caller if response is expected. */ 192 m->ah = ah; 193 m->retries = cm_id_priv->max_cm_retries; 194 195 atomic_inc(&cm_id_priv->refcount); 196 m->context[0] = cm_id_priv; 197 *msg = m; 198 return 0; 199 } 200 201 static int cm_alloc_response_msg(struct cm_port *port, 202 struct ib_mad_recv_wc *mad_recv_wc, 203 struct ib_mad_send_buf **msg) 204 { 205 struct ib_mad_send_buf *m; 206 struct ib_ah *ah; 207 208 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc, 209 mad_recv_wc->recv_buf.grh, port->port_num); 210 if (IS_ERR(ah)) 211 return PTR_ERR(ah); 212 213 m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index, 214 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 215 GFP_ATOMIC); 216 if (IS_ERR(m)) { 217 ib_destroy_ah(ah); 218 return PTR_ERR(m); 219 } 220 m->ah = ah; 221 *msg = m; 222 return 0; 223 } 224 225 static void cm_free_msg(struct ib_mad_send_buf *msg) 226 { 227 ib_destroy_ah(msg->ah); 228 if (msg->context[0]) 229 cm_deref_id(msg->context[0]); 230 ib_free_send_mad(msg); 231 } 232 233 static void * cm_copy_private_data(const void *private_data, 234 u8 private_data_len) 235 { 236 void *data; 237 238 if (!private_data || !private_data_len) 239 return NULL; 240 241 data = kmemdup(private_data, private_data_len, GFP_KERNEL); 242 if (!data) 243 return ERR_PTR(-ENOMEM); 244 245 return data; 246 } 247 248 static void cm_set_private_data(struct cm_id_private *cm_id_priv, 249 void *private_data, u8 private_data_len) 250 { 251 if (cm_id_priv->private_data && cm_id_priv->private_data_len) 252 kfree(cm_id_priv->private_data); 253 254 cm_id_priv->private_data = private_data; 255 cm_id_priv->private_data_len = private_data_len; 256 } 257 258 static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc, 259 struct ib_grh *grh, struct cm_av *av) 260 { 261 av->port = port; 262 av->pkey_index = wc->pkey_index; 263 ib_init_ah_from_wc(port->cm_dev->device, port->port_num, wc, 264 grh, &av->ah_attr); 265 } 266 267 static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) 268 { 269 struct cm_device *cm_dev; 270 struct cm_port *port = NULL; 271 unsigned long flags; 272 int ret; 273 u8 p; 274 275 read_lock_irqsave(&cm.device_lock, flags); 276 list_for_each_entry(cm_dev, &cm.device_list, list) { 277 if (!ib_find_cached_gid(cm_dev->device, &path->sgid, 278 &p, NULL)) { 279 port = &cm_dev->port[p-1]; 280 break; 281 } 282 } 283 read_unlock_irqrestore(&cm.device_lock, flags); 284 285 if (!port) 286 return -EINVAL; 287 288 ret = ib_find_cached_pkey(cm_dev->device, port->port_num, 289 be16_to_cpu(path->pkey), &av->pkey_index); 290 if (ret) 291 return ret; 292 293 av->port = port; 294 ib_init_ah_from_path(cm_dev->device, port->port_num, path, 295 &av->ah_attr); 296 av->packet_life_time = path->packet_life_time; 297 return 0; 298 } 299 300 static int cm_alloc_id(struct cm_id_private *cm_id_priv) 301 { 302 unsigned long flags; 303 int ret, id; 304 static int next_id; 305 306 do { 307 spin_lock_irqsave(&cm.lock, flags); 308 ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, 309 next_id, &id); 310 if (!ret) 311 next_id = ((unsigned) id + 1) & MAX_ID_MASK; 312 spin_unlock_irqrestore(&cm.lock, flags); 313 } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) ); 314 315 cm_id_priv->id.local_id = (__force __be32) (id ^ cm.random_id_operand); 316 return ret; 317 } 318 319 static void cm_free_id(__be32 local_id) 320 { 321 unsigned long flags; 322 323 spin_lock_irqsave(&cm.lock, flags); 324 idr_remove(&cm.local_id_table, 325 (__force int) (local_id ^ cm.random_id_operand)); 326 spin_unlock_irqrestore(&cm.lock, flags); 327 } 328 329 static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id) 330 { 331 struct cm_id_private *cm_id_priv; 332 333 cm_id_priv = idr_find(&cm.local_id_table, 334 (__force int) (local_id ^ cm.random_id_operand)); 335 if (cm_id_priv) { 336 if (cm_id_priv->id.remote_id == remote_id) 337 atomic_inc(&cm_id_priv->refcount); 338 else 339 cm_id_priv = NULL; 340 } 341 342 return cm_id_priv; 343 } 344 345 static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id) 346 { 347 struct cm_id_private *cm_id_priv; 348 unsigned long flags; 349 350 spin_lock_irqsave(&cm.lock, flags); 351 cm_id_priv = cm_get_id(local_id, remote_id); 352 spin_unlock_irqrestore(&cm.lock, flags); 353 354 return cm_id_priv; 355 } 356 357 static void cm_mask_copy(u8 *dst, u8 *src, u8 *mask) 358 { 359 int i; 360 361 for (i = 0; i < IB_CM_COMPARE_SIZE / sizeof(unsigned long); i++) 362 ((unsigned long *) dst)[i] = ((unsigned long *) src)[i] & 363 ((unsigned long *) mask)[i]; 364 } 365 366 static int cm_compare_data(struct ib_cm_compare_data *src_data, 367 struct ib_cm_compare_data *dst_data) 368 { 369 u8 src[IB_CM_COMPARE_SIZE]; 370 u8 dst[IB_CM_COMPARE_SIZE]; 371 372 if (!src_data || !dst_data) 373 return 0; 374 375 cm_mask_copy(src, src_data->data, dst_data->mask); 376 cm_mask_copy(dst, dst_data->data, src_data->mask); 377 return memcmp(src, dst, IB_CM_COMPARE_SIZE); 378 } 379 380 static int cm_compare_private_data(u8 *private_data, 381 struct ib_cm_compare_data *dst_data) 382 { 383 u8 src[IB_CM_COMPARE_SIZE]; 384 385 if (!dst_data) 386 return 0; 387 388 cm_mask_copy(src, private_data, dst_data->mask); 389 return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE); 390 } 391 392 static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv) 393 { 394 struct rb_node **link = &cm.listen_service_table.rb_node; 395 struct rb_node *parent = NULL; 396 struct cm_id_private *cur_cm_id_priv; 397 __be64 service_id = cm_id_priv->id.service_id; 398 __be64 service_mask = cm_id_priv->id.service_mask; 399 int data_cmp; 400 401 while (*link) { 402 parent = *link; 403 cur_cm_id_priv = rb_entry(parent, struct cm_id_private, 404 service_node); 405 data_cmp = cm_compare_data(cm_id_priv->compare_data, 406 cur_cm_id_priv->compare_data); 407 if ((cur_cm_id_priv->id.service_mask & service_id) == 408 (service_mask & cur_cm_id_priv->id.service_id) && 409 (cm_id_priv->id.device == cur_cm_id_priv->id.device) && 410 !data_cmp) 411 return cur_cm_id_priv; 412 413 if (cm_id_priv->id.device < cur_cm_id_priv->id.device) 414 link = &(*link)->rb_left; 415 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device) 416 link = &(*link)->rb_right; 417 else if (service_id < cur_cm_id_priv->id.service_id) 418 link = &(*link)->rb_left; 419 else if (service_id > cur_cm_id_priv->id.service_id) 420 link = &(*link)->rb_right; 421 else if (data_cmp < 0) 422 link = &(*link)->rb_left; 423 else 424 link = &(*link)->rb_right; 425 } 426 rb_link_node(&cm_id_priv->service_node, parent, link); 427 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table); 428 return NULL; 429 } 430 431 static struct cm_id_private * cm_find_listen(struct ib_device *device, 432 __be64 service_id, 433 u8 *private_data) 434 { 435 struct rb_node *node = cm.listen_service_table.rb_node; 436 struct cm_id_private *cm_id_priv; 437 int data_cmp; 438 439 while (node) { 440 cm_id_priv = rb_entry(node, struct cm_id_private, service_node); 441 data_cmp = cm_compare_private_data(private_data, 442 cm_id_priv->compare_data); 443 if ((cm_id_priv->id.service_mask & service_id) == 444 cm_id_priv->id.service_id && 445 (cm_id_priv->id.device == device) && !data_cmp) 446 return cm_id_priv; 447 448 if (device < cm_id_priv->id.device) 449 node = node->rb_left; 450 else if (device > cm_id_priv->id.device) 451 node = node->rb_right; 452 else if (service_id < cm_id_priv->id.service_id) 453 node = node->rb_left; 454 else if (service_id > cm_id_priv->id.service_id) 455 node = node->rb_right; 456 else if (data_cmp < 0) 457 node = node->rb_left; 458 else 459 node = node->rb_right; 460 } 461 return NULL; 462 } 463 464 static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info 465 *timewait_info) 466 { 467 struct rb_node **link = &cm.remote_id_table.rb_node; 468 struct rb_node *parent = NULL; 469 struct cm_timewait_info *cur_timewait_info; 470 __be64 remote_ca_guid = timewait_info->remote_ca_guid; 471 __be32 remote_id = timewait_info->work.remote_id; 472 473 while (*link) { 474 parent = *link; 475 cur_timewait_info = rb_entry(parent, struct cm_timewait_info, 476 remote_id_node); 477 if (remote_id < cur_timewait_info->work.remote_id) 478 link = &(*link)->rb_left; 479 else if (remote_id > cur_timewait_info->work.remote_id) 480 link = &(*link)->rb_right; 481 else if (remote_ca_guid < cur_timewait_info->remote_ca_guid) 482 link = &(*link)->rb_left; 483 else if (remote_ca_guid > cur_timewait_info->remote_ca_guid) 484 link = &(*link)->rb_right; 485 else 486 return cur_timewait_info; 487 } 488 timewait_info->inserted_remote_id = 1; 489 rb_link_node(&timewait_info->remote_id_node, parent, link); 490 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table); 491 return NULL; 492 } 493 494 static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid, 495 __be32 remote_id) 496 { 497 struct rb_node *node = cm.remote_id_table.rb_node; 498 struct cm_timewait_info *timewait_info; 499 500 while (node) { 501 timewait_info = rb_entry(node, struct cm_timewait_info, 502 remote_id_node); 503 if (remote_id < timewait_info->work.remote_id) 504 node = node->rb_left; 505 else if (remote_id > timewait_info->work.remote_id) 506 node = node->rb_right; 507 else if (remote_ca_guid < timewait_info->remote_ca_guid) 508 node = node->rb_left; 509 else if (remote_ca_guid > timewait_info->remote_ca_guid) 510 node = node->rb_right; 511 else 512 return timewait_info; 513 } 514 return NULL; 515 } 516 517 static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info 518 *timewait_info) 519 { 520 struct rb_node **link = &cm.remote_qp_table.rb_node; 521 struct rb_node *parent = NULL; 522 struct cm_timewait_info *cur_timewait_info; 523 __be64 remote_ca_guid = timewait_info->remote_ca_guid; 524 __be32 remote_qpn = timewait_info->remote_qpn; 525 526 while (*link) { 527 parent = *link; 528 cur_timewait_info = rb_entry(parent, struct cm_timewait_info, 529 remote_qp_node); 530 if (remote_qpn < cur_timewait_info->remote_qpn) 531 link = &(*link)->rb_left; 532 else if (remote_qpn > cur_timewait_info->remote_qpn) 533 link = &(*link)->rb_right; 534 else if (remote_ca_guid < cur_timewait_info->remote_ca_guid) 535 link = &(*link)->rb_left; 536 else if (remote_ca_guid > cur_timewait_info->remote_ca_guid) 537 link = &(*link)->rb_right; 538 else 539 return cur_timewait_info; 540 } 541 timewait_info->inserted_remote_qp = 1; 542 rb_link_node(&timewait_info->remote_qp_node, parent, link); 543 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table); 544 return NULL; 545 } 546 547 static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private 548 *cm_id_priv) 549 { 550 struct rb_node **link = &cm.remote_sidr_table.rb_node; 551 struct rb_node *parent = NULL; 552 struct cm_id_private *cur_cm_id_priv; 553 union ib_gid *port_gid = &cm_id_priv->av.dgid; 554 __be32 remote_id = cm_id_priv->id.remote_id; 555 556 while (*link) { 557 parent = *link; 558 cur_cm_id_priv = rb_entry(parent, struct cm_id_private, 559 sidr_id_node); 560 if (remote_id < cur_cm_id_priv->id.remote_id) 561 link = &(*link)->rb_left; 562 else if (remote_id > cur_cm_id_priv->id.remote_id) 563 link = &(*link)->rb_right; 564 else { 565 int cmp; 566 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid, 567 sizeof *port_gid); 568 if (cmp < 0) 569 link = &(*link)->rb_left; 570 else if (cmp > 0) 571 link = &(*link)->rb_right; 572 else 573 return cur_cm_id_priv; 574 } 575 } 576 rb_link_node(&cm_id_priv->sidr_id_node, parent, link); 577 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 578 return NULL; 579 } 580 581 static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv, 582 enum ib_cm_sidr_status status) 583 { 584 struct ib_cm_sidr_rep_param param; 585 586 memset(¶m, 0, sizeof param); 587 param.status = status; 588 ib_send_cm_sidr_rep(&cm_id_priv->id, ¶m); 589 } 590 591 struct ib_cm_id *ib_create_cm_id(struct ib_device *device, 592 ib_cm_handler cm_handler, 593 void *context) 594 { 595 struct cm_id_private *cm_id_priv; 596 int ret; 597 598 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL); 599 if (!cm_id_priv) 600 return ERR_PTR(-ENOMEM); 601 602 cm_id_priv->id.state = IB_CM_IDLE; 603 cm_id_priv->id.device = device; 604 cm_id_priv->id.cm_handler = cm_handler; 605 cm_id_priv->id.context = context; 606 cm_id_priv->id.remote_cm_qpn = 1; 607 ret = cm_alloc_id(cm_id_priv); 608 if (ret) 609 goto error; 610 611 spin_lock_init(&cm_id_priv->lock); 612 init_completion(&cm_id_priv->comp); 613 INIT_LIST_HEAD(&cm_id_priv->work_list); 614 atomic_set(&cm_id_priv->work_count, -1); 615 atomic_set(&cm_id_priv->refcount, 1); 616 return &cm_id_priv->id; 617 618 error: 619 kfree(cm_id_priv); 620 return ERR_PTR(-ENOMEM); 621 } 622 EXPORT_SYMBOL(ib_create_cm_id); 623 624 static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv) 625 { 626 struct cm_work *work; 627 628 if (list_empty(&cm_id_priv->work_list)) 629 return NULL; 630 631 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list); 632 list_del(&work->list); 633 return work; 634 } 635 636 static void cm_free_work(struct cm_work *work) 637 { 638 if (work->mad_recv_wc) 639 ib_free_recv_mad(work->mad_recv_wc); 640 kfree(work); 641 } 642 643 static inline int cm_convert_to_ms(int iba_time) 644 { 645 /* approximate conversion to ms from 4.096us x 2^iba_time */ 646 return 1 << max(iba_time - 8, 0); 647 } 648 649 static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info) 650 { 651 if (timewait_info->inserted_remote_id) { 652 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table); 653 timewait_info->inserted_remote_id = 0; 654 } 655 656 if (timewait_info->inserted_remote_qp) { 657 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table); 658 timewait_info->inserted_remote_qp = 0; 659 } 660 } 661 662 static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id) 663 { 664 struct cm_timewait_info *timewait_info; 665 666 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL); 667 if (!timewait_info) 668 return ERR_PTR(-ENOMEM); 669 670 timewait_info->work.local_id = local_id; 671 INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler); 672 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; 673 return timewait_info; 674 } 675 676 static void cm_enter_timewait(struct cm_id_private *cm_id_priv) 677 { 678 int wait_time; 679 unsigned long flags; 680 681 spin_lock_irqsave(&cm.lock, flags); 682 cm_cleanup_timewait(cm_id_priv->timewait_info); 683 list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list); 684 spin_unlock_irqrestore(&cm.lock, flags); 685 686 /* 687 * The cm_id could be destroyed by the user before we exit timewait. 688 * To protect against this, we search for the cm_id after exiting 689 * timewait before notifying the user that we've exited timewait. 690 */ 691 cm_id_priv->id.state = IB_CM_TIMEWAIT; 692 wait_time = cm_convert_to_ms(cm_id_priv->av.packet_life_time + 1); 693 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, 694 msecs_to_jiffies(wait_time)); 695 cm_id_priv->timewait_info = NULL; 696 } 697 698 static void cm_reset_to_idle(struct cm_id_private *cm_id_priv) 699 { 700 unsigned long flags; 701 702 cm_id_priv->id.state = IB_CM_IDLE; 703 if (cm_id_priv->timewait_info) { 704 spin_lock_irqsave(&cm.lock, flags); 705 cm_cleanup_timewait(cm_id_priv->timewait_info); 706 spin_unlock_irqrestore(&cm.lock, flags); 707 kfree(cm_id_priv->timewait_info); 708 cm_id_priv->timewait_info = NULL; 709 } 710 } 711 712 static void cm_destroy_id(struct ib_cm_id *cm_id, int err) 713 { 714 struct cm_id_private *cm_id_priv; 715 struct cm_work *work; 716 unsigned long flags; 717 718 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 719 retest: 720 spin_lock_irqsave(&cm_id_priv->lock, flags); 721 switch (cm_id->state) { 722 case IB_CM_LISTEN: 723 cm_id->state = IB_CM_IDLE; 724 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 725 spin_lock_irqsave(&cm.lock, flags); 726 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table); 727 spin_unlock_irqrestore(&cm.lock, flags); 728 break; 729 case IB_CM_SIDR_REQ_SENT: 730 cm_id->state = IB_CM_IDLE; 731 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 732 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 733 break; 734 case IB_CM_SIDR_REQ_RCVD: 735 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 736 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT); 737 break; 738 case IB_CM_REQ_SENT: 739 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 740 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 741 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, 742 &cm_id_priv->id.device->node_guid, 743 sizeof cm_id_priv->id.device->node_guid, 744 NULL, 0); 745 break; 746 case IB_CM_REQ_RCVD: 747 if (err == -ENOMEM) { 748 /* Do not reject to allow future retries. */ 749 cm_reset_to_idle(cm_id_priv); 750 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 751 } else { 752 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 753 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, 754 NULL, 0, NULL, 0); 755 } 756 break; 757 case IB_CM_MRA_REQ_RCVD: 758 case IB_CM_REP_SENT: 759 case IB_CM_MRA_REP_RCVD: 760 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 761 /* Fall through */ 762 case IB_CM_MRA_REQ_SENT: 763 case IB_CM_REP_RCVD: 764 case IB_CM_MRA_REP_SENT: 765 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 766 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, 767 NULL, 0, NULL, 0); 768 break; 769 case IB_CM_ESTABLISHED: 770 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 771 ib_send_cm_dreq(cm_id, NULL, 0); 772 goto retest; 773 case IB_CM_DREQ_SENT: 774 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 775 cm_enter_timewait(cm_id_priv); 776 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 777 break; 778 case IB_CM_DREQ_RCVD: 779 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 780 ib_send_cm_drep(cm_id, NULL, 0); 781 break; 782 default: 783 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 784 break; 785 } 786 787 cm_free_id(cm_id->local_id); 788 cm_deref_id(cm_id_priv); 789 wait_for_completion(&cm_id_priv->comp); 790 while ((work = cm_dequeue_work(cm_id_priv)) != NULL) 791 cm_free_work(work); 792 kfree(cm_id_priv->compare_data); 793 kfree(cm_id_priv->private_data); 794 kfree(cm_id_priv); 795 } 796 797 void ib_destroy_cm_id(struct ib_cm_id *cm_id) 798 { 799 cm_destroy_id(cm_id, 0); 800 } 801 EXPORT_SYMBOL(ib_destroy_cm_id); 802 803 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask, 804 struct ib_cm_compare_data *compare_data) 805 { 806 struct cm_id_private *cm_id_priv, *cur_cm_id_priv; 807 unsigned long flags; 808 int ret = 0; 809 810 service_mask = service_mask ? service_mask : 811 __constant_cpu_to_be64(~0ULL); 812 service_id &= service_mask; 813 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID && 814 (service_id != IB_CM_ASSIGN_SERVICE_ID)) 815 return -EINVAL; 816 817 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 818 if (cm_id->state != IB_CM_IDLE) 819 return -EINVAL; 820 821 if (compare_data) { 822 cm_id_priv->compare_data = kzalloc(sizeof *compare_data, 823 GFP_KERNEL); 824 if (!cm_id_priv->compare_data) 825 return -ENOMEM; 826 cm_mask_copy(cm_id_priv->compare_data->data, 827 compare_data->data, compare_data->mask); 828 memcpy(cm_id_priv->compare_data->mask, compare_data->mask, 829 IB_CM_COMPARE_SIZE); 830 } 831 832 cm_id->state = IB_CM_LISTEN; 833 834 spin_lock_irqsave(&cm.lock, flags); 835 if (service_id == IB_CM_ASSIGN_SERVICE_ID) { 836 cm_id->service_id = cpu_to_be64(cm.listen_service_id++); 837 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 838 } else { 839 cm_id->service_id = service_id; 840 cm_id->service_mask = service_mask; 841 } 842 cur_cm_id_priv = cm_insert_listen(cm_id_priv); 843 spin_unlock_irqrestore(&cm.lock, flags); 844 845 if (cur_cm_id_priv) { 846 cm_id->state = IB_CM_IDLE; 847 kfree(cm_id_priv->compare_data); 848 cm_id_priv->compare_data = NULL; 849 ret = -EBUSY; 850 } 851 return ret; 852 } 853 EXPORT_SYMBOL(ib_cm_listen); 854 855 static __be64 cm_form_tid(struct cm_id_private *cm_id_priv, 856 enum cm_msg_sequence msg_seq) 857 { 858 u64 hi_tid, low_tid; 859 860 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32; 861 low_tid = (u64) ((__force u32)cm_id_priv->id.local_id | 862 (msg_seq << 30)); 863 return cpu_to_be64(hi_tid | low_tid); 864 } 865 866 static void cm_format_mad_hdr(struct ib_mad_hdr *hdr, 867 __be16 attr_id, __be64 tid) 868 { 869 hdr->base_version = IB_MGMT_BASE_VERSION; 870 hdr->mgmt_class = IB_MGMT_CLASS_CM; 871 hdr->class_version = IB_CM_CLASS_VERSION; 872 hdr->method = IB_MGMT_METHOD_SEND; 873 hdr->attr_id = attr_id; 874 hdr->tid = tid; 875 } 876 877 static void cm_format_req(struct cm_req_msg *req_msg, 878 struct cm_id_private *cm_id_priv, 879 struct ib_cm_req_param *param) 880 { 881 cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID, 882 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ)); 883 884 req_msg->local_comm_id = cm_id_priv->id.local_id; 885 req_msg->service_id = param->service_id; 886 req_msg->local_ca_guid = cm_id_priv->id.device->node_guid; 887 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num)); 888 cm_req_set_resp_res(req_msg, param->responder_resources); 889 cm_req_set_init_depth(req_msg, param->initiator_depth); 890 cm_req_set_remote_resp_timeout(req_msg, 891 param->remote_cm_response_timeout); 892 cm_req_set_qp_type(req_msg, param->qp_type); 893 cm_req_set_flow_ctrl(req_msg, param->flow_control); 894 cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn)); 895 cm_req_set_local_resp_timeout(req_msg, 896 param->local_cm_response_timeout); 897 cm_req_set_retry_count(req_msg, param->retry_count); 898 req_msg->pkey = param->primary_path->pkey; 899 cm_req_set_path_mtu(req_msg, param->primary_path->mtu); 900 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count); 901 cm_req_set_max_cm_retries(req_msg, param->max_cm_retries); 902 cm_req_set_srq(req_msg, param->srq); 903 904 req_msg->primary_local_lid = param->primary_path->slid; 905 req_msg->primary_remote_lid = param->primary_path->dlid; 906 req_msg->primary_local_gid = param->primary_path->sgid; 907 req_msg->primary_remote_gid = param->primary_path->dgid; 908 cm_req_set_primary_flow_label(req_msg, param->primary_path->flow_label); 909 cm_req_set_primary_packet_rate(req_msg, param->primary_path->rate); 910 req_msg->primary_traffic_class = param->primary_path->traffic_class; 911 req_msg->primary_hop_limit = param->primary_path->hop_limit; 912 cm_req_set_primary_sl(req_msg, param->primary_path->sl); 913 cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */ 914 cm_req_set_primary_local_ack_timeout(req_msg, 915 min(31, param->primary_path->packet_life_time + 1)); 916 917 if (param->alternate_path) { 918 req_msg->alt_local_lid = param->alternate_path->slid; 919 req_msg->alt_remote_lid = param->alternate_path->dlid; 920 req_msg->alt_local_gid = param->alternate_path->sgid; 921 req_msg->alt_remote_gid = param->alternate_path->dgid; 922 cm_req_set_alt_flow_label(req_msg, 923 param->alternate_path->flow_label); 924 cm_req_set_alt_packet_rate(req_msg, param->alternate_path->rate); 925 req_msg->alt_traffic_class = param->alternate_path->traffic_class; 926 req_msg->alt_hop_limit = param->alternate_path->hop_limit; 927 cm_req_set_alt_sl(req_msg, param->alternate_path->sl); 928 cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */ 929 cm_req_set_alt_local_ack_timeout(req_msg, 930 min(31, param->alternate_path->packet_life_time + 1)); 931 } 932 933 if (param->private_data && param->private_data_len) 934 memcpy(req_msg->private_data, param->private_data, 935 param->private_data_len); 936 } 937 938 static int cm_validate_req_param(struct ib_cm_req_param *param) 939 { 940 /* peer-to-peer not supported */ 941 if (param->peer_to_peer) 942 return -EINVAL; 943 944 if (!param->primary_path) 945 return -EINVAL; 946 947 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC) 948 return -EINVAL; 949 950 if (param->private_data && 951 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE) 952 return -EINVAL; 953 954 if (param->alternate_path && 955 (param->alternate_path->pkey != param->primary_path->pkey || 956 param->alternate_path->mtu != param->primary_path->mtu)) 957 return -EINVAL; 958 959 return 0; 960 } 961 962 int ib_send_cm_req(struct ib_cm_id *cm_id, 963 struct ib_cm_req_param *param) 964 { 965 struct cm_id_private *cm_id_priv; 966 struct cm_req_msg *req_msg; 967 unsigned long flags; 968 int ret; 969 970 ret = cm_validate_req_param(param); 971 if (ret) 972 return ret; 973 974 /* Verify that we're not in timewait. */ 975 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 976 spin_lock_irqsave(&cm_id_priv->lock, flags); 977 if (cm_id->state != IB_CM_IDLE) { 978 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 979 ret = -EINVAL; 980 goto out; 981 } 982 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 983 984 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> 985 id.local_id); 986 if (IS_ERR(cm_id_priv->timewait_info)) { 987 ret = PTR_ERR(cm_id_priv->timewait_info); 988 goto out; 989 } 990 991 ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av); 992 if (ret) 993 goto error1; 994 if (param->alternate_path) { 995 ret = cm_init_av_by_path(param->alternate_path, 996 &cm_id_priv->alt_av); 997 if (ret) 998 goto error1; 999 } 1000 cm_id->service_id = param->service_id; 1001 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 1002 cm_id_priv->timeout_ms = cm_convert_to_ms( 1003 param->primary_path->packet_life_time) * 2 + 1004 cm_convert_to_ms( 1005 param->remote_cm_response_timeout); 1006 cm_id_priv->max_cm_retries = param->max_cm_retries; 1007 cm_id_priv->initiator_depth = param->initiator_depth; 1008 cm_id_priv->responder_resources = param->responder_resources; 1009 cm_id_priv->retry_count = param->retry_count; 1010 cm_id_priv->path_mtu = param->primary_path->mtu; 1011 cm_id_priv->pkey = param->primary_path->pkey; 1012 cm_id_priv->qp_type = param->qp_type; 1013 1014 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg); 1015 if (ret) 1016 goto error1; 1017 1018 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad; 1019 cm_format_req(req_msg, cm_id_priv, param); 1020 cm_id_priv->tid = req_msg->hdr.tid; 1021 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms; 1022 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT; 1023 1024 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg); 1025 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg); 1026 1027 spin_lock_irqsave(&cm_id_priv->lock, flags); 1028 ret = ib_post_send_mad(cm_id_priv->msg, NULL); 1029 if (ret) { 1030 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1031 goto error2; 1032 } 1033 BUG_ON(cm_id->state != IB_CM_IDLE); 1034 cm_id->state = IB_CM_REQ_SENT; 1035 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1036 return 0; 1037 1038 error2: cm_free_msg(cm_id_priv->msg); 1039 error1: kfree(cm_id_priv->timewait_info); 1040 out: return ret; 1041 } 1042 EXPORT_SYMBOL(ib_send_cm_req); 1043 1044 static int cm_issue_rej(struct cm_port *port, 1045 struct ib_mad_recv_wc *mad_recv_wc, 1046 enum ib_cm_rej_reason reason, 1047 enum cm_msg_response msg_rejected, 1048 void *ari, u8 ari_length) 1049 { 1050 struct ib_mad_send_buf *msg = NULL; 1051 struct cm_rej_msg *rej_msg, *rcv_msg; 1052 int ret; 1053 1054 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg); 1055 if (ret) 1056 return ret; 1057 1058 /* We just need common CM header information. Cast to any message. */ 1059 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad; 1060 rej_msg = (struct cm_rej_msg *) msg->mad; 1061 1062 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid); 1063 rej_msg->remote_comm_id = rcv_msg->local_comm_id; 1064 rej_msg->local_comm_id = rcv_msg->remote_comm_id; 1065 cm_rej_set_msg_rejected(rej_msg, msg_rejected); 1066 rej_msg->reason = cpu_to_be16(reason); 1067 1068 if (ari && ari_length) { 1069 cm_rej_set_reject_info_len(rej_msg, ari_length); 1070 memcpy(rej_msg->ari, ari, ari_length); 1071 } 1072 1073 ret = ib_post_send_mad(msg, NULL); 1074 if (ret) 1075 cm_free_msg(msg); 1076 1077 return ret; 1078 } 1079 1080 static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid, 1081 __be32 local_qpn, __be32 remote_qpn) 1082 { 1083 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) || 1084 ((local_ca_guid == remote_ca_guid) && 1085 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn)))); 1086 } 1087 1088 static void cm_format_paths_from_req(struct cm_req_msg *req_msg, 1089 struct ib_sa_path_rec *primary_path, 1090 struct ib_sa_path_rec *alt_path) 1091 { 1092 memset(primary_path, 0, sizeof *primary_path); 1093 primary_path->dgid = req_msg->primary_local_gid; 1094 primary_path->sgid = req_msg->primary_remote_gid; 1095 primary_path->dlid = req_msg->primary_local_lid; 1096 primary_path->slid = req_msg->primary_remote_lid; 1097 primary_path->flow_label = cm_req_get_primary_flow_label(req_msg); 1098 primary_path->hop_limit = req_msg->primary_hop_limit; 1099 primary_path->traffic_class = req_msg->primary_traffic_class; 1100 primary_path->reversible = 1; 1101 primary_path->pkey = req_msg->pkey; 1102 primary_path->sl = cm_req_get_primary_sl(req_msg); 1103 primary_path->mtu_selector = IB_SA_EQ; 1104 primary_path->mtu = cm_req_get_path_mtu(req_msg); 1105 primary_path->rate_selector = IB_SA_EQ; 1106 primary_path->rate = cm_req_get_primary_packet_rate(req_msg); 1107 primary_path->packet_life_time_selector = IB_SA_EQ; 1108 primary_path->packet_life_time = 1109 cm_req_get_primary_local_ack_timeout(req_msg); 1110 primary_path->packet_life_time -= (primary_path->packet_life_time > 0); 1111 1112 if (req_msg->alt_local_lid) { 1113 memset(alt_path, 0, sizeof *alt_path); 1114 alt_path->dgid = req_msg->alt_local_gid; 1115 alt_path->sgid = req_msg->alt_remote_gid; 1116 alt_path->dlid = req_msg->alt_local_lid; 1117 alt_path->slid = req_msg->alt_remote_lid; 1118 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg); 1119 alt_path->hop_limit = req_msg->alt_hop_limit; 1120 alt_path->traffic_class = req_msg->alt_traffic_class; 1121 alt_path->reversible = 1; 1122 alt_path->pkey = req_msg->pkey; 1123 alt_path->sl = cm_req_get_alt_sl(req_msg); 1124 alt_path->mtu_selector = IB_SA_EQ; 1125 alt_path->mtu = cm_req_get_path_mtu(req_msg); 1126 alt_path->rate_selector = IB_SA_EQ; 1127 alt_path->rate = cm_req_get_alt_packet_rate(req_msg); 1128 alt_path->packet_life_time_selector = IB_SA_EQ; 1129 alt_path->packet_life_time = 1130 cm_req_get_alt_local_ack_timeout(req_msg); 1131 alt_path->packet_life_time -= (alt_path->packet_life_time > 0); 1132 } 1133 } 1134 1135 static void cm_format_req_event(struct cm_work *work, 1136 struct cm_id_private *cm_id_priv, 1137 struct ib_cm_id *listen_id) 1138 { 1139 struct cm_req_msg *req_msg; 1140 struct ib_cm_req_event_param *param; 1141 1142 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1143 param = &work->cm_event.param.req_rcvd; 1144 param->listen_id = listen_id; 1145 param->port = cm_id_priv->av.port->port_num; 1146 param->primary_path = &work->path[0]; 1147 if (req_msg->alt_local_lid) 1148 param->alternate_path = &work->path[1]; 1149 else 1150 param->alternate_path = NULL; 1151 param->remote_ca_guid = req_msg->local_ca_guid; 1152 param->remote_qkey = be32_to_cpu(req_msg->local_qkey); 1153 param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg)); 1154 param->qp_type = cm_req_get_qp_type(req_msg); 1155 param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg)); 1156 param->responder_resources = cm_req_get_init_depth(req_msg); 1157 param->initiator_depth = cm_req_get_resp_res(req_msg); 1158 param->local_cm_response_timeout = 1159 cm_req_get_remote_resp_timeout(req_msg); 1160 param->flow_control = cm_req_get_flow_ctrl(req_msg); 1161 param->remote_cm_response_timeout = 1162 cm_req_get_local_resp_timeout(req_msg); 1163 param->retry_count = cm_req_get_retry_count(req_msg); 1164 param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); 1165 param->srq = cm_req_get_srq(req_msg); 1166 work->cm_event.private_data = &req_msg->private_data; 1167 } 1168 1169 static void cm_process_work(struct cm_id_private *cm_id_priv, 1170 struct cm_work *work) 1171 { 1172 unsigned long flags; 1173 int ret; 1174 1175 /* We will typically only have the current event to report. */ 1176 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event); 1177 cm_free_work(work); 1178 1179 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) { 1180 spin_lock_irqsave(&cm_id_priv->lock, flags); 1181 work = cm_dequeue_work(cm_id_priv); 1182 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1183 BUG_ON(!work); 1184 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, 1185 &work->cm_event); 1186 cm_free_work(work); 1187 } 1188 cm_deref_id(cm_id_priv); 1189 if (ret) 1190 cm_destroy_id(&cm_id_priv->id, ret); 1191 } 1192 1193 static void cm_format_mra(struct cm_mra_msg *mra_msg, 1194 struct cm_id_private *cm_id_priv, 1195 enum cm_msg_response msg_mraed, u8 service_timeout, 1196 const void *private_data, u8 private_data_len) 1197 { 1198 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid); 1199 cm_mra_set_msg_mraed(mra_msg, msg_mraed); 1200 mra_msg->local_comm_id = cm_id_priv->id.local_id; 1201 mra_msg->remote_comm_id = cm_id_priv->id.remote_id; 1202 cm_mra_set_service_timeout(mra_msg, service_timeout); 1203 1204 if (private_data && private_data_len) 1205 memcpy(mra_msg->private_data, private_data, private_data_len); 1206 } 1207 1208 static void cm_format_rej(struct cm_rej_msg *rej_msg, 1209 struct cm_id_private *cm_id_priv, 1210 enum ib_cm_rej_reason reason, 1211 void *ari, 1212 u8 ari_length, 1213 const void *private_data, 1214 u8 private_data_len) 1215 { 1216 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid); 1217 rej_msg->remote_comm_id = cm_id_priv->id.remote_id; 1218 1219 switch(cm_id_priv->id.state) { 1220 case IB_CM_REQ_RCVD: 1221 rej_msg->local_comm_id = 0; 1222 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ); 1223 break; 1224 case IB_CM_MRA_REQ_SENT: 1225 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1226 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ); 1227 break; 1228 case IB_CM_REP_RCVD: 1229 case IB_CM_MRA_REP_SENT: 1230 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1231 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP); 1232 break; 1233 default: 1234 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1235 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER); 1236 break; 1237 } 1238 1239 rej_msg->reason = cpu_to_be16(reason); 1240 if (ari && ari_length) { 1241 cm_rej_set_reject_info_len(rej_msg, ari_length); 1242 memcpy(rej_msg->ari, ari, ari_length); 1243 } 1244 1245 if (private_data && private_data_len) 1246 memcpy(rej_msg->private_data, private_data, private_data_len); 1247 } 1248 1249 static void cm_dup_req_handler(struct cm_work *work, 1250 struct cm_id_private *cm_id_priv) 1251 { 1252 struct ib_mad_send_buf *msg = NULL; 1253 unsigned long flags; 1254 int ret; 1255 1256 /* Quick state check to discard duplicate REQs. */ 1257 if (cm_id_priv->id.state == IB_CM_REQ_RCVD) 1258 return; 1259 1260 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); 1261 if (ret) 1262 return; 1263 1264 spin_lock_irqsave(&cm_id_priv->lock, flags); 1265 switch (cm_id_priv->id.state) { 1266 case IB_CM_MRA_REQ_SENT: 1267 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 1268 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout, 1269 cm_id_priv->private_data, 1270 cm_id_priv->private_data_len); 1271 break; 1272 case IB_CM_TIMEWAIT: 1273 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv, 1274 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0); 1275 break; 1276 default: 1277 goto unlock; 1278 } 1279 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1280 1281 ret = ib_post_send_mad(msg, NULL); 1282 if (ret) 1283 goto free; 1284 return; 1285 1286 unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1287 free: cm_free_msg(msg); 1288 } 1289 1290 static struct cm_id_private * cm_match_req(struct cm_work *work, 1291 struct cm_id_private *cm_id_priv) 1292 { 1293 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv; 1294 struct cm_timewait_info *timewait_info; 1295 struct cm_req_msg *req_msg; 1296 unsigned long flags; 1297 1298 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1299 1300 /* Check for possible duplicate REQ. */ 1301 spin_lock_irqsave(&cm.lock, flags); 1302 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info); 1303 if (timewait_info) { 1304 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id, 1305 timewait_info->work.remote_id); 1306 spin_unlock_irqrestore(&cm.lock, flags); 1307 if (cur_cm_id_priv) { 1308 cm_dup_req_handler(work, cur_cm_id_priv); 1309 cm_deref_id(cur_cm_id_priv); 1310 } 1311 return NULL; 1312 } 1313 1314 /* Check for stale connections. */ 1315 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info); 1316 if (timewait_info) { 1317 cm_cleanup_timewait(cm_id_priv->timewait_info); 1318 spin_unlock_irqrestore(&cm.lock, flags); 1319 cm_issue_rej(work->port, work->mad_recv_wc, 1320 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ, 1321 NULL, 0); 1322 return NULL; 1323 } 1324 1325 /* Find matching listen request. */ 1326 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device, 1327 req_msg->service_id, 1328 req_msg->private_data); 1329 if (!listen_cm_id_priv) { 1330 cm_cleanup_timewait(cm_id_priv->timewait_info); 1331 spin_unlock_irqrestore(&cm.lock, flags); 1332 cm_issue_rej(work->port, work->mad_recv_wc, 1333 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ, 1334 NULL, 0); 1335 goto out; 1336 } 1337 atomic_inc(&listen_cm_id_priv->refcount); 1338 atomic_inc(&cm_id_priv->refcount); 1339 cm_id_priv->id.state = IB_CM_REQ_RCVD; 1340 atomic_inc(&cm_id_priv->work_count); 1341 spin_unlock_irqrestore(&cm.lock, flags); 1342 out: 1343 return listen_cm_id_priv; 1344 } 1345 1346 static int cm_req_handler(struct cm_work *work) 1347 { 1348 struct ib_cm_id *cm_id; 1349 struct cm_id_private *cm_id_priv, *listen_cm_id_priv; 1350 struct cm_req_msg *req_msg; 1351 int ret; 1352 1353 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1354 1355 cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL); 1356 if (IS_ERR(cm_id)) 1357 return PTR_ERR(cm_id); 1358 1359 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1360 cm_id_priv->id.remote_id = req_msg->local_comm_id; 1361 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 1362 work->mad_recv_wc->recv_buf.grh, 1363 &cm_id_priv->av); 1364 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> 1365 id.local_id); 1366 if (IS_ERR(cm_id_priv->timewait_info)) { 1367 ret = PTR_ERR(cm_id_priv->timewait_info); 1368 goto destroy; 1369 } 1370 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id; 1371 cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid; 1372 cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg); 1373 1374 listen_cm_id_priv = cm_match_req(work, cm_id_priv); 1375 if (!listen_cm_id_priv) { 1376 ret = -EINVAL; 1377 kfree(cm_id_priv->timewait_info); 1378 goto destroy; 1379 } 1380 1381 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; 1382 cm_id_priv->id.context = listen_cm_id_priv->id.context; 1383 cm_id_priv->id.service_id = req_msg->service_id; 1384 cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); 1385 1386 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); 1387 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); 1388 if (ret) { 1389 ib_get_cached_gid(work->port->cm_dev->device, 1390 work->port->port_num, 0, &work->path[0].sgid); 1391 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID, 1392 &work->path[0].sgid, sizeof work->path[0].sgid, 1393 NULL, 0); 1394 goto rejected; 1395 } 1396 if (req_msg->alt_local_lid) { 1397 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av); 1398 if (ret) { 1399 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID, 1400 &work->path[0].sgid, 1401 sizeof work->path[0].sgid, NULL, 0); 1402 goto rejected; 1403 } 1404 } 1405 cm_id_priv->tid = req_msg->hdr.tid; 1406 cm_id_priv->timeout_ms = cm_convert_to_ms( 1407 cm_req_get_local_resp_timeout(req_msg)); 1408 cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg); 1409 cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg); 1410 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg); 1411 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg); 1412 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg); 1413 cm_id_priv->pkey = req_msg->pkey; 1414 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg); 1415 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg); 1416 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); 1417 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg); 1418 1419 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id); 1420 cm_process_work(cm_id_priv, work); 1421 cm_deref_id(listen_cm_id_priv); 1422 return 0; 1423 1424 rejected: 1425 atomic_dec(&cm_id_priv->refcount); 1426 cm_deref_id(listen_cm_id_priv); 1427 destroy: 1428 ib_destroy_cm_id(cm_id); 1429 return ret; 1430 } 1431 1432 static void cm_format_rep(struct cm_rep_msg *rep_msg, 1433 struct cm_id_private *cm_id_priv, 1434 struct ib_cm_rep_param *param) 1435 { 1436 cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid); 1437 rep_msg->local_comm_id = cm_id_priv->id.local_id; 1438 rep_msg->remote_comm_id = cm_id_priv->id.remote_id; 1439 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num)); 1440 cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn)); 1441 rep_msg->resp_resources = param->responder_resources; 1442 rep_msg->initiator_depth = param->initiator_depth; 1443 cm_rep_set_target_ack_delay(rep_msg, param->target_ack_delay); 1444 cm_rep_set_failover(rep_msg, param->failover_accepted); 1445 cm_rep_set_flow_ctrl(rep_msg, param->flow_control); 1446 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count); 1447 cm_rep_set_srq(rep_msg, param->srq); 1448 rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid; 1449 1450 if (param->private_data && param->private_data_len) 1451 memcpy(rep_msg->private_data, param->private_data, 1452 param->private_data_len); 1453 } 1454 1455 int ib_send_cm_rep(struct ib_cm_id *cm_id, 1456 struct ib_cm_rep_param *param) 1457 { 1458 struct cm_id_private *cm_id_priv; 1459 struct ib_mad_send_buf *msg; 1460 struct cm_rep_msg *rep_msg; 1461 unsigned long flags; 1462 int ret; 1463 1464 if (param->private_data && 1465 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE) 1466 return -EINVAL; 1467 1468 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1469 spin_lock_irqsave(&cm_id_priv->lock, flags); 1470 if (cm_id->state != IB_CM_REQ_RCVD && 1471 cm_id->state != IB_CM_MRA_REQ_SENT) { 1472 ret = -EINVAL; 1473 goto out; 1474 } 1475 1476 ret = cm_alloc_msg(cm_id_priv, &msg); 1477 if (ret) 1478 goto out; 1479 1480 rep_msg = (struct cm_rep_msg *) msg->mad; 1481 cm_format_rep(rep_msg, cm_id_priv, param); 1482 msg->timeout_ms = cm_id_priv->timeout_ms; 1483 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT; 1484 1485 ret = ib_post_send_mad(msg, NULL); 1486 if (ret) { 1487 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1488 cm_free_msg(msg); 1489 return ret; 1490 } 1491 1492 cm_id->state = IB_CM_REP_SENT; 1493 cm_id_priv->msg = msg; 1494 cm_id_priv->initiator_depth = param->initiator_depth; 1495 cm_id_priv->responder_resources = param->responder_resources; 1496 cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg); 1497 cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg); 1498 1499 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1500 return ret; 1501 } 1502 EXPORT_SYMBOL(ib_send_cm_rep); 1503 1504 static void cm_format_rtu(struct cm_rtu_msg *rtu_msg, 1505 struct cm_id_private *cm_id_priv, 1506 const void *private_data, 1507 u8 private_data_len) 1508 { 1509 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid); 1510 rtu_msg->local_comm_id = cm_id_priv->id.local_id; 1511 rtu_msg->remote_comm_id = cm_id_priv->id.remote_id; 1512 1513 if (private_data && private_data_len) 1514 memcpy(rtu_msg->private_data, private_data, private_data_len); 1515 } 1516 1517 int ib_send_cm_rtu(struct ib_cm_id *cm_id, 1518 const void *private_data, 1519 u8 private_data_len) 1520 { 1521 struct cm_id_private *cm_id_priv; 1522 struct ib_mad_send_buf *msg; 1523 unsigned long flags; 1524 void *data; 1525 int ret; 1526 1527 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE) 1528 return -EINVAL; 1529 1530 data = cm_copy_private_data(private_data, private_data_len); 1531 if (IS_ERR(data)) 1532 return PTR_ERR(data); 1533 1534 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1535 spin_lock_irqsave(&cm_id_priv->lock, flags); 1536 if (cm_id->state != IB_CM_REP_RCVD && 1537 cm_id->state != IB_CM_MRA_REP_SENT) { 1538 ret = -EINVAL; 1539 goto error; 1540 } 1541 1542 ret = cm_alloc_msg(cm_id_priv, &msg); 1543 if (ret) 1544 goto error; 1545 1546 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, 1547 private_data, private_data_len); 1548 1549 ret = ib_post_send_mad(msg, NULL); 1550 if (ret) { 1551 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1552 cm_free_msg(msg); 1553 kfree(data); 1554 return ret; 1555 } 1556 1557 cm_id->state = IB_CM_ESTABLISHED; 1558 cm_set_private_data(cm_id_priv, data, private_data_len); 1559 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1560 return 0; 1561 1562 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1563 kfree(data); 1564 return ret; 1565 } 1566 EXPORT_SYMBOL(ib_send_cm_rtu); 1567 1568 static void cm_format_rep_event(struct cm_work *work) 1569 { 1570 struct cm_rep_msg *rep_msg; 1571 struct ib_cm_rep_event_param *param; 1572 1573 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; 1574 param = &work->cm_event.param.rep_rcvd; 1575 param->remote_ca_guid = rep_msg->local_ca_guid; 1576 param->remote_qkey = be32_to_cpu(rep_msg->local_qkey); 1577 param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg)); 1578 param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg)); 1579 param->responder_resources = rep_msg->initiator_depth; 1580 param->initiator_depth = rep_msg->resp_resources; 1581 param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg); 1582 param->failover_accepted = cm_rep_get_failover(rep_msg); 1583 param->flow_control = cm_rep_get_flow_ctrl(rep_msg); 1584 param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg); 1585 param->srq = cm_rep_get_srq(rep_msg); 1586 work->cm_event.private_data = &rep_msg->private_data; 1587 } 1588 1589 static void cm_dup_rep_handler(struct cm_work *work) 1590 { 1591 struct cm_id_private *cm_id_priv; 1592 struct cm_rep_msg *rep_msg; 1593 struct ib_mad_send_buf *msg = NULL; 1594 unsigned long flags; 1595 int ret; 1596 1597 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad; 1598 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 1599 rep_msg->local_comm_id); 1600 if (!cm_id_priv) 1601 return; 1602 1603 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); 1604 if (ret) 1605 goto deref; 1606 1607 spin_lock_irqsave(&cm_id_priv->lock, flags); 1608 if (cm_id_priv->id.state == IB_CM_ESTABLISHED) 1609 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, 1610 cm_id_priv->private_data, 1611 cm_id_priv->private_data_len); 1612 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT) 1613 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 1614 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout, 1615 cm_id_priv->private_data, 1616 cm_id_priv->private_data_len); 1617 else 1618 goto unlock; 1619 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1620 1621 ret = ib_post_send_mad(msg, NULL); 1622 if (ret) 1623 goto free; 1624 goto deref; 1625 1626 unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1627 free: cm_free_msg(msg); 1628 deref: cm_deref_id(cm_id_priv); 1629 } 1630 1631 static int cm_rep_handler(struct cm_work *work) 1632 { 1633 struct cm_id_private *cm_id_priv; 1634 struct cm_rep_msg *rep_msg; 1635 unsigned long flags; 1636 int ret; 1637 1638 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; 1639 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0); 1640 if (!cm_id_priv) { 1641 cm_dup_rep_handler(work); 1642 return -EINVAL; 1643 } 1644 1645 cm_format_rep_event(work); 1646 1647 spin_lock_irqsave(&cm_id_priv->lock, flags); 1648 switch (cm_id_priv->id.state) { 1649 case IB_CM_REQ_SENT: 1650 case IB_CM_MRA_REQ_RCVD: 1651 break; 1652 default: 1653 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1654 ret = -EINVAL; 1655 goto error; 1656 } 1657 1658 cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id; 1659 cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid; 1660 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg); 1661 1662 spin_lock(&cm.lock); 1663 /* Check for duplicate REP. */ 1664 if (cm_insert_remote_id(cm_id_priv->timewait_info)) { 1665 spin_unlock(&cm.lock); 1666 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1667 ret = -EINVAL; 1668 goto error; 1669 } 1670 /* Check for a stale connection. */ 1671 if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) { 1672 rb_erase(&cm_id_priv->timewait_info->remote_id_node, 1673 &cm.remote_id_table); 1674 cm_id_priv->timewait_info->inserted_remote_id = 0; 1675 spin_unlock(&cm.lock); 1676 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1677 cm_issue_rej(work->port, work->mad_recv_wc, 1678 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP, 1679 NULL, 0); 1680 ret = -EINVAL; 1681 goto error; 1682 } 1683 spin_unlock(&cm.lock); 1684 1685 cm_id_priv->id.state = IB_CM_REP_RCVD; 1686 cm_id_priv->id.remote_id = rep_msg->local_comm_id; 1687 cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg); 1688 cm_id_priv->initiator_depth = rep_msg->resp_resources; 1689 cm_id_priv->responder_resources = rep_msg->initiator_depth; 1690 cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg); 1691 cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg); 1692 1693 /* todo: handle peer_to_peer */ 1694 1695 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1696 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1697 if (!ret) 1698 list_add_tail(&work->list, &cm_id_priv->work_list); 1699 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1700 1701 if (ret) 1702 cm_process_work(cm_id_priv, work); 1703 else 1704 cm_deref_id(cm_id_priv); 1705 return 0; 1706 1707 error: 1708 cm_deref_id(cm_id_priv); 1709 return ret; 1710 } 1711 1712 static int cm_establish_handler(struct cm_work *work) 1713 { 1714 struct cm_id_private *cm_id_priv; 1715 unsigned long flags; 1716 int ret; 1717 1718 /* See comment in cm_establish about lookup. */ 1719 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id); 1720 if (!cm_id_priv) 1721 return -EINVAL; 1722 1723 spin_lock_irqsave(&cm_id_priv->lock, flags); 1724 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) { 1725 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1726 goto out; 1727 } 1728 1729 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1730 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1731 if (!ret) 1732 list_add_tail(&work->list, &cm_id_priv->work_list); 1733 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1734 1735 if (ret) 1736 cm_process_work(cm_id_priv, work); 1737 else 1738 cm_deref_id(cm_id_priv); 1739 return 0; 1740 out: 1741 cm_deref_id(cm_id_priv); 1742 return -EINVAL; 1743 } 1744 1745 static int cm_rtu_handler(struct cm_work *work) 1746 { 1747 struct cm_id_private *cm_id_priv; 1748 struct cm_rtu_msg *rtu_msg; 1749 unsigned long flags; 1750 int ret; 1751 1752 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad; 1753 cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id, 1754 rtu_msg->local_comm_id); 1755 if (!cm_id_priv) 1756 return -EINVAL; 1757 1758 work->cm_event.private_data = &rtu_msg->private_data; 1759 1760 spin_lock_irqsave(&cm_id_priv->lock, flags); 1761 if (cm_id_priv->id.state != IB_CM_REP_SENT && 1762 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) { 1763 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1764 goto out; 1765 } 1766 cm_id_priv->id.state = IB_CM_ESTABLISHED; 1767 1768 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1769 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1770 if (!ret) 1771 list_add_tail(&work->list, &cm_id_priv->work_list); 1772 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1773 1774 if (ret) 1775 cm_process_work(cm_id_priv, work); 1776 else 1777 cm_deref_id(cm_id_priv); 1778 return 0; 1779 out: 1780 cm_deref_id(cm_id_priv); 1781 return -EINVAL; 1782 } 1783 1784 static void cm_format_dreq(struct cm_dreq_msg *dreq_msg, 1785 struct cm_id_private *cm_id_priv, 1786 const void *private_data, 1787 u8 private_data_len) 1788 { 1789 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID, 1790 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ)); 1791 dreq_msg->local_comm_id = cm_id_priv->id.local_id; 1792 dreq_msg->remote_comm_id = cm_id_priv->id.remote_id; 1793 cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn); 1794 1795 if (private_data && private_data_len) 1796 memcpy(dreq_msg->private_data, private_data, private_data_len); 1797 } 1798 1799 int ib_send_cm_dreq(struct ib_cm_id *cm_id, 1800 const void *private_data, 1801 u8 private_data_len) 1802 { 1803 struct cm_id_private *cm_id_priv; 1804 struct ib_mad_send_buf *msg; 1805 unsigned long flags; 1806 int ret; 1807 1808 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE) 1809 return -EINVAL; 1810 1811 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1812 spin_lock_irqsave(&cm_id_priv->lock, flags); 1813 if (cm_id->state != IB_CM_ESTABLISHED) { 1814 ret = -EINVAL; 1815 goto out; 1816 } 1817 1818 ret = cm_alloc_msg(cm_id_priv, &msg); 1819 if (ret) { 1820 cm_enter_timewait(cm_id_priv); 1821 goto out; 1822 } 1823 1824 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv, 1825 private_data, private_data_len); 1826 msg->timeout_ms = cm_id_priv->timeout_ms; 1827 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT; 1828 1829 ret = ib_post_send_mad(msg, NULL); 1830 if (ret) { 1831 cm_enter_timewait(cm_id_priv); 1832 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1833 cm_free_msg(msg); 1834 return ret; 1835 } 1836 1837 cm_id->state = IB_CM_DREQ_SENT; 1838 cm_id_priv->msg = msg; 1839 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1840 return ret; 1841 } 1842 EXPORT_SYMBOL(ib_send_cm_dreq); 1843 1844 static void cm_format_drep(struct cm_drep_msg *drep_msg, 1845 struct cm_id_private *cm_id_priv, 1846 const void *private_data, 1847 u8 private_data_len) 1848 { 1849 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid); 1850 drep_msg->local_comm_id = cm_id_priv->id.local_id; 1851 drep_msg->remote_comm_id = cm_id_priv->id.remote_id; 1852 1853 if (private_data && private_data_len) 1854 memcpy(drep_msg->private_data, private_data, private_data_len); 1855 } 1856 1857 int ib_send_cm_drep(struct ib_cm_id *cm_id, 1858 const void *private_data, 1859 u8 private_data_len) 1860 { 1861 struct cm_id_private *cm_id_priv; 1862 struct ib_mad_send_buf *msg; 1863 unsigned long flags; 1864 void *data; 1865 int ret; 1866 1867 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE) 1868 return -EINVAL; 1869 1870 data = cm_copy_private_data(private_data, private_data_len); 1871 if (IS_ERR(data)) 1872 return PTR_ERR(data); 1873 1874 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1875 spin_lock_irqsave(&cm_id_priv->lock, flags); 1876 if (cm_id->state != IB_CM_DREQ_RCVD) { 1877 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1878 kfree(data); 1879 return -EINVAL; 1880 } 1881 1882 cm_set_private_data(cm_id_priv, data, private_data_len); 1883 cm_enter_timewait(cm_id_priv); 1884 1885 ret = cm_alloc_msg(cm_id_priv, &msg); 1886 if (ret) 1887 goto out; 1888 1889 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, 1890 private_data, private_data_len); 1891 1892 ret = ib_post_send_mad(msg, NULL); 1893 if (ret) { 1894 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1895 cm_free_msg(msg); 1896 return ret; 1897 } 1898 1899 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1900 return ret; 1901 } 1902 EXPORT_SYMBOL(ib_send_cm_drep); 1903 1904 static int cm_issue_drep(struct cm_port *port, 1905 struct ib_mad_recv_wc *mad_recv_wc) 1906 { 1907 struct ib_mad_send_buf *msg = NULL; 1908 struct cm_dreq_msg *dreq_msg; 1909 struct cm_drep_msg *drep_msg; 1910 int ret; 1911 1912 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg); 1913 if (ret) 1914 return ret; 1915 1916 dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad; 1917 drep_msg = (struct cm_drep_msg *) msg->mad; 1918 1919 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid); 1920 drep_msg->remote_comm_id = dreq_msg->local_comm_id; 1921 drep_msg->local_comm_id = dreq_msg->remote_comm_id; 1922 1923 ret = ib_post_send_mad(msg, NULL); 1924 if (ret) 1925 cm_free_msg(msg); 1926 1927 return ret; 1928 } 1929 1930 static int cm_dreq_handler(struct cm_work *work) 1931 { 1932 struct cm_id_private *cm_id_priv; 1933 struct cm_dreq_msg *dreq_msg; 1934 struct ib_mad_send_buf *msg = NULL; 1935 unsigned long flags; 1936 int ret; 1937 1938 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad; 1939 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id, 1940 dreq_msg->local_comm_id); 1941 if (!cm_id_priv) { 1942 cm_issue_drep(work->port, work->mad_recv_wc); 1943 return -EINVAL; 1944 } 1945 1946 work->cm_event.private_data = &dreq_msg->private_data; 1947 1948 spin_lock_irqsave(&cm_id_priv->lock, flags); 1949 if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg)) 1950 goto unlock; 1951 1952 switch (cm_id_priv->id.state) { 1953 case IB_CM_REP_SENT: 1954 case IB_CM_DREQ_SENT: 1955 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1956 break; 1957 case IB_CM_ESTABLISHED: 1958 case IB_CM_MRA_REP_RCVD: 1959 break; 1960 case IB_CM_TIMEWAIT: 1961 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) 1962 goto unlock; 1963 1964 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, 1965 cm_id_priv->private_data, 1966 cm_id_priv->private_data_len); 1967 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1968 1969 if (ib_post_send_mad(msg, NULL)) 1970 cm_free_msg(msg); 1971 goto deref; 1972 default: 1973 goto unlock; 1974 } 1975 cm_id_priv->id.state = IB_CM_DREQ_RCVD; 1976 cm_id_priv->tid = dreq_msg->hdr.tid; 1977 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1978 if (!ret) 1979 list_add_tail(&work->list, &cm_id_priv->work_list); 1980 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1981 1982 if (ret) 1983 cm_process_work(cm_id_priv, work); 1984 else 1985 cm_deref_id(cm_id_priv); 1986 return 0; 1987 1988 unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1989 deref: cm_deref_id(cm_id_priv); 1990 return -EINVAL; 1991 } 1992 1993 static int cm_drep_handler(struct cm_work *work) 1994 { 1995 struct cm_id_private *cm_id_priv; 1996 struct cm_drep_msg *drep_msg; 1997 unsigned long flags; 1998 int ret; 1999 2000 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad; 2001 cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id, 2002 drep_msg->local_comm_id); 2003 if (!cm_id_priv) 2004 return -EINVAL; 2005 2006 work->cm_event.private_data = &drep_msg->private_data; 2007 2008 spin_lock_irqsave(&cm_id_priv->lock, flags); 2009 if (cm_id_priv->id.state != IB_CM_DREQ_SENT && 2010 cm_id_priv->id.state != IB_CM_DREQ_RCVD) { 2011 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2012 goto out; 2013 } 2014 cm_enter_timewait(cm_id_priv); 2015 2016 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2017 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2018 if (!ret) 2019 list_add_tail(&work->list, &cm_id_priv->work_list); 2020 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2021 2022 if (ret) 2023 cm_process_work(cm_id_priv, work); 2024 else 2025 cm_deref_id(cm_id_priv); 2026 return 0; 2027 out: 2028 cm_deref_id(cm_id_priv); 2029 return -EINVAL; 2030 } 2031 2032 int ib_send_cm_rej(struct ib_cm_id *cm_id, 2033 enum ib_cm_rej_reason reason, 2034 void *ari, 2035 u8 ari_length, 2036 const void *private_data, 2037 u8 private_data_len) 2038 { 2039 struct cm_id_private *cm_id_priv; 2040 struct ib_mad_send_buf *msg; 2041 unsigned long flags; 2042 int ret; 2043 2044 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) || 2045 (ari && ari_length > IB_CM_REJ_ARI_LENGTH)) 2046 return -EINVAL; 2047 2048 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2049 2050 spin_lock_irqsave(&cm_id_priv->lock, flags); 2051 switch (cm_id->state) { 2052 case IB_CM_REQ_SENT: 2053 case IB_CM_MRA_REQ_RCVD: 2054 case IB_CM_REQ_RCVD: 2055 case IB_CM_MRA_REQ_SENT: 2056 case IB_CM_REP_RCVD: 2057 case IB_CM_MRA_REP_SENT: 2058 ret = cm_alloc_msg(cm_id_priv, &msg); 2059 if (!ret) 2060 cm_format_rej((struct cm_rej_msg *) msg->mad, 2061 cm_id_priv, reason, ari, ari_length, 2062 private_data, private_data_len); 2063 2064 cm_reset_to_idle(cm_id_priv); 2065 break; 2066 case IB_CM_REP_SENT: 2067 case IB_CM_MRA_REP_RCVD: 2068 ret = cm_alloc_msg(cm_id_priv, &msg); 2069 if (!ret) 2070 cm_format_rej((struct cm_rej_msg *) msg->mad, 2071 cm_id_priv, reason, ari, ari_length, 2072 private_data, private_data_len); 2073 2074 cm_enter_timewait(cm_id_priv); 2075 break; 2076 default: 2077 ret = -EINVAL; 2078 goto out; 2079 } 2080 2081 if (ret) 2082 goto out; 2083 2084 ret = ib_post_send_mad(msg, NULL); 2085 if (ret) 2086 cm_free_msg(msg); 2087 2088 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2089 return ret; 2090 } 2091 EXPORT_SYMBOL(ib_send_cm_rej); 2092 2093 static void cm_format_rej_event(struct cm_work *work) 2094 { 2095 struct cm_rej_msg *rej_msg; 2096 struct ib_cm_rej_event_param *param; 2097 2098 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; 2099 param = &work->cm_event.param.rej_rcvd; 2100 param->ari = rej_msg->ari; 2101 param->ari_length = cm_rej_get_reject_info_len(rej_msg); 2102 param->reason = __be16_to_cpu(rej_msg->reason); 2103 work->cm_event.private_data = &rej_msg->private_data; 2104 } 2105 2106 static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg) 2107 { 2108 struct cm_timewait_info *timewait_info; 2109 struct cm_id_private *cm_id_priv; 2110 unsigned long flags; 2111 __be32 remote_id; 2112 2113 remote_id = rej_msg->local_comm_id; 2114 2115 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) { 2116 spin_lock_irqsave(&cm.lock, flags); 2117 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari), 2118 remote_id); 2119 if (!timewait_info) { 2120 spin_unlock_irqrestore(&cm.lock, flags); 2121 return NULL; 2122 } 2123 cm_id_priv = idr_find(&cm.local_id_table, (__force int) 2124 (timewait_info->work.local_id ^ 2125 cm.random_id_operand)); 2126 if (cm_id_priv) { 2127 if (cm_id_priv->id.remote_id == remote_id) 2128 atomic_inc(&cm_id_priv->refcount); 2129 else 2130 cm_id_priv = NULL; 2131 } 2132 spin_unlock_irqrestore(&cm.lock, flags); 2133 } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ) 2134 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0); 2135 else 2136 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id); 2137 2138 return cm_id_priv; 2139 } 2140 2141 static int cm_rej_handler(struct cm_work *work) 2142 { 2143 struct cm_id_private *cm_id_priv; 2144 struct cm_rej_msg *rej_msg; 2145 unsigned long flags; 2146 int ret; 2147 2148 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; 2149 cm_id_priv = cm_acquire_rejected_id(rej_msg); 2150 if (!cm_id_priv) 2151 return -EINVAL; 2152 2153 cm_format_rej_event(work); 2154 2155 spin_lock_irqsave(&cm_id_priv->lock, flags); 2156 switch (cm_id_priv->id.state) { 2157 case IB_CM_REQ_SENT: 2158 case IB_CM_MRA_REQ_RCVD: 2159 case IB_CM_REP_SENT: 2160 case IB_CM_MRA_REP_RCVD: 2161 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2162 /* fall through */ 2163 case IB_CM_REQ_RCVD: 2164 case IB_CM_MRA_REQ_SENT: 2165 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN) 2166 cm_enter_timewait(cm_id_priv); 2167 else 2168 cm_reset_to_idle(cm_id_priv); 2169 break; 2170 case IB_CM_DREQ_SENT: 2171 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2172 /* fall through */ 2173 case IB_CM_REP_RCVD: 2174 case IB_CM_MRA_REP_SENT: 2175 case IB_CM_ESTABLISHED: 2176 cm_enter_timewait(cm_id_priv); 2177 break; 2178 default: 2179 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2180 ret = -EINVAL; 2181 goto out; 2182 } 2183 2184 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2185 if (!ret) 2186 list_add_tail(&work->list, &cm_id_priv->work_list); 2187 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2188 2189 if (ret) 2190 cm_process_work(cm_id_priv, work); 2191 else 2192 cm_deref_id(cm_id_priv); 2193 return 0; 2194 out: 2195 cm_deref_id(cm_id_priv); 2196 return -EINVAL; 2197 } 2198 2199 int ib_send_cm_mra(struct ib_cm_id *cm_id, 2200 u8 service_timeout, 2201 const void *private_data, 2202 u8 private_data_len) 2203 { 2204 struct cm_id_private *cm_id_priv; 2205 struct ib_mad_send_buf *msg; 2206 void *data; 2207 unsigned long flags; 2208 int ret; 2209 2210 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE) 2211 return -EINVAL; 2212 2213 data = cm_copy_private_data(private_data, private_data_len); 2214 if (IS_ERR(data)) 2215 return PTR_ERR(data); 2216 2217 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2218 2219 spin_lock_irqsave(&cm_id_priv->lock, flags); 2220 switch(cm_id_priv->id.state) { 2221 case IB_CM_REQ_RCVD: 2222 ret = cm_alloc_msg(cm_id_priv, &msg); 2223 if (ret) 2224 goto error1; 2225 2226 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2227 CM_MSG_RESPONSE_REQ, service_timeout, 2228 private_data, private_data_len); 2229 ret = ib_post_send_mad(msg, NULL); 2230 if (ret) 2231 goto error2; 2232 cm_id->state = IB_CM_MRA_REQ_SENT; 2233 break; 2234 case IB_CM_REP_RCVD: 2235 ret = cm_alloc_msg(cm_id_priv, &msg); 2236 if (ret) 2237 goto error1; 2238 2239 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2240 CM_MSG_RESPONSE_REP, service_timeout, 2241 private_data, private_data_len); 2242 ret = ib_post_send_mad(msg, NULL); 2243 if (ret) 2244 goto error2; 2245 cm_id->state = IB_CM_MRA_REP_SENT; 2246 break; 2247 case IB_CM_ESTABLISHED: 2248 ret = cm_alloc_msg(cm_id_priv, &msg); 2249 if (ret) 2250 goto error1; 2251 2252 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2253 CM_MSG_RESPONSE_OTHER, service_timeout, 2254 private_data, private_data_len); 2255 ret = ib_post_send_mad(msg, NULL); 2256 if (ret) 2257 goto error2; 2258 cm_id->lap_state = IB_CM_MRA_LAP_SENT; 2259 break; 2260 default: 2261 ret = -EINVAL; 2262 goto error1; 2263 } 2264 cm_id_priv->service_timeout = service_timeout; 2265 cm_set_private_data(cm_id_priv, data, private_data_len); 2266 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2267 return 0; 2268 2269 error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2270 kfree(data); 2271 return ret; 2272 2273 error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2274 kfree(data); 2275 cm_free_msg(msg); 2276 return ret; 2277 } 2278 EXPORT_SYMBOL(ib_send_cm_mra); 2279 2280 static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg) 2281 { 2282 switch (cm_mra_get_msg_mraed(mra_msg)) { 2283 case CM_MSG_RESPONSE_REQ: 2284 return cm_acquire_id(mra_msg->remote_comm_id, 0); 2285 case CM_MSG_RESPONSE_REP: 2286 case CM_MSG_RESPONSE_OTHER: 2287 return cm_acquire_id(mra_msg->remote_comm_id, 2288 mra_msg->local_comm_id); 2289 default: 2290 return NULL; 2291 } 2292 } 2293 2294 static int cm_mra_handler(struct cm_work *work) 2295 { 2296 struct cm_id_private *cm_id_priv; 2297 struct cm_mra_msg *mra_msg; 2298 unsigned long flags; 2299 int timeout, ret; 2300 2301 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad; 2302 cm_id_priv = cm_acquire_mraed_id(mra_msg); 2303 if (!cm_id_priv) 2304 return -EINVAL; 2305 2306 work->cm_event.private_data = &mra_msg->private_data; 2307 work->cm_event.param.mra_rcvd.service_timeout = 2308 cm_mra_get_service_timeout(mra_msg); 2309 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) + 2310 cm_convert_to_ms(cm_id_priv->av.packet_life_time); 2311 2312 spin_lock_irqsave(&cm_id_priv->lock, flags); 2313 switch (cm_id_priv->id.state) { 2314 case IB_CM_REQ_SENT: 2315 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ || 2316 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2317 cm_id_priv->msg, timeout)) 2318 goto out; 2319 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD; 2320 break; 2321 case IB_CM_REP_SENT: 2322 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP || 2323 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2324 cm_id_priv->msg, timeout)) 2325 goto out; 2326 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD; 2327 break; 2328 case IB_CM_ESTABLISHED: 2329 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER || 2330 cm_id_priv->id.lap_state != IB_CM_LAP_SENT || 2331 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2332 cm_id_priv->msg, timeout)) 2333 goto out; 2334 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD; 2335 break; 2336 default: 2337 goto out; 2338 } 2339 2340 cm_id_priv->msg->context[1] = (void *) (unsigned long) 2341 cm_id_priv->id.state; 2342 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2343 if (!ret) 2344 list_add_tail(&work->list, &cm_id_priv->work_list); 2345 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2346 2347 if (ret) 2348 cm_process_work(cm_id_priv, work); 2349 else 2350 cm_deref_id(cm_id_priv); 2351 return 0; 2352 out: 2353 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2354 cm_deref_id(cm_id_priv); 2355 return -EINVAL; 2356 } 2357 2358 static void cm_format_lap(struct cm_lap_msg *lap_msg, 2359 struct cm_id_private *cm_id_priv, 2360 struct ib_sa_path_rec *alternate_path, 2361 const void *private_data, 2362 u8 private_data_len) 2363 { 2364 cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID, 2365 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP)); 2366 lap_msg->local_comm_id = cm_id_priv->id.local_id; 2367 lap_msg->remote_comm_id = cm_id_priv->id.remote_id; 2368 cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn); 2369 /* todo: need remote CM response timeout */ 2370 cm_lap_set_remote_resp_timeout(lap_msg, 0x1F); 2371 lap_msg->alt_local_lid = alternate_path->slid; 2372 lap_msg->alt_remote_lid = alternate_path->dlid; 2373 lap_msg->alt_local_gid = alternate_path->sgid; 2374 lap_msg->alt_remote_gid = alternate_path->dgid; 2375 cm_lap_set_flow_label(lap_msg, alternate_path->flow_label); 2376 cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class); 2377 lap_msg->alt_hop_limit = alternate_path->hop_limit; 2378 cm_lap_set_packet_rate(lap_msg, alternate_path->rate); 2379 cm_lap_set_sl(lap_msg, alternate_path->sl); 2380 cm_lap_set_subnet_local(lap_msg, 1); /* local only... */ 2381 cm_lap_set_local_ack_timeout(lap_msg, 2382 min(31, alternate_path->packet_life_time + 1)); 2383 2384 if (private_data && private_data_len) 2385 memcpy(lap_msg->private_data, private_data, private_data_len); 2386 } 2387 2388 int ib_send_cm_lap(struct ib_cm_id *cm_id, 2389 struct ib_sa_path_rec *alternate_path, 2390 const void *private_data, 2391 u8 private_data_len) 2392 { 2393 struct cm_id_private *cm_id_priv; 2394 struct ib_mad_send_buf *msg; 2395 unsigned long flags; 2396 int ret; 2397 2398 if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE) 2399 return -EINVAL; 2400 2401 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2402 spin_lock_irqsave(&cm_id_priv->lock, flags); 2403 if (cm_id->state != IB_CM_ESTABLISHED || 2404 (cm_id->lap_state != IB_CM_LAP_UNINIT && 2405 cm_id->lap_state != IB_CM_LAP_IDLE)) { 2406 ret = -EINVAL; 2407 goto out; 2408 } 2409 2410 ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av); 2411 if (ret) 2412 goto out; 2413 2414 ret = cm_alloc_msg(cm_id_priv, &msg); 2415 if (ret) 2416 goto out; 2417 2418 cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv, 2419 alternate_path, private_data, private_data_len); 2420 msg->timeout_ms = cm_id_priv->timeout_ms; 2421 msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED; 2422 2423 ret = ib_post_send_mad(msg, NULL); 2424 if (ret) { 2425 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2426 cm_free_msg(msg); 2427 return ret; 2428 } 2429 2430 cm_id->lap_state = IB_CM_LAP_SENT; 2431 cm_id_priv->msg = msg; 2432 2433 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2434 return ret; 2435 } 2436 EXPORT_SYMBOL(ib_send_cm_lap); 2437 2438 static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv, 2439 struct ib_sa_path_rec *path, 2440 struct cm_lap_msg *lap_msg) 2441 { 2442 memset(path, 0, sizeof *path); 2443 path->dgid = lap_msg->alt_local_gid; 2444 path->sgid = lap_msg->alt_remote_gid; 2445 path->dlid = lap_msg->alt_local_lid; 2446 path->slid = lap_msg->alt_remote_lid; 2447 path->flow_label = cm_lap_get_flow_label(lap_msg); 2448 path->hop_limit = lap_msg->alt_hop_limit; 2449 path->traffic_class = cm_lap_get_traffic_class(lap_msg); 2450 path->reversible = 1; 2451 path->pkey = cm_id_priv->pkey; 2452 path->sl = cm_lap_get_sl(lap_msg); 2453 path->mtu_selector = IB_SA_EQ; 2454 path->mtu = cm_id_priv->path_mtu; 2455 path->rate_selector = IB_SA_EQ; 2456 path->rate = cm_lap_get_packet_rate(lap_msg); 2457 path->packet_life_time_selector = IB_SA_EQ; 2458 path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg); 2459 path->packet_life_time -= (path->packet_life_time > 0); 2460 } 2461 2462 static int cm_lap_handler(struct cm_work *work) 2463 { 2464 struct cm_id_private *cm_id_priv; 2465 struct cm_lap_msg *lap_msg; 2466 struct ib_cm_lap_event_param *param; 2467 struct ib_mad_send_buf *msg = NULL; 2468 unsigned long flags; 2469 int ret; 2470 2471 /* todo: verify LAP request and send reject APR if invalid. */ 2472 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad; 2473 cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id, 2474 lap_msg->local_comm_id); 2475 if (!cm_id_priv) 2476 return -EINVAL; 2477 2478 param = &work->cm_event.param.lap_rcvd; 2479 param->alternate_path = &work->path[0]; 2480 cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg); 2481 work->cm_event.private_data = &lap_msg->private_data; 2482 2483 spin_lock_irqsave(&cm_id_priv->lock, flags); 2484 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) 2485 goto unlock; 2486 2487 switch (cm_id_priv->id.lap_state) { 2488 case IB_CM_LAP_UNINIT: 2489 case IB_CM_LAP_IDLE: 2490 break; 2491 case IB_CM_MRA_LAP_SENT: 2492 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) 2493 goto unlock; 2494 2495 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2496 CM_MSG_RESPONSE_OTHER, 2497 cm_id_priv->service_timeout, 2498 cm_id_priv->private_data, 2499 cm_id_priv->private_data_len); 2500 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2501 2502 if (ib_post_send_mad(msg, NULL)) 2503 cm_free_msg(msg); 2504 goto deref; 2505 default: 2506 goto unlock; 2507 } 2508 2509 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD; 2510 cm_id_priv->tid = lap_msg->hdr.tid; 2511 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 2512 work->mad_recv_wc->recv_buf.grh, 2513 &cm_id_priv->av); 2514 cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av); 2515 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2516 if (!ret) 2517 list_add_tail(&work->list, &cm_id_priv->work_list); 2518 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2519 2520 if (ret) 2521 cm_process_work(cm_id_priv, work); 2522 else 2523 cm_deref_id(cm_id_priv); 2524 return 0; 2525 2526 unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2527 deref: cm_deref_id(cm_id_priv); 2528 return -EINVAL; 2529 } 2530 2531 static void cm_format_apr(struct cm_apr_msg *apr_msg, 2532 struct cm_id_private *cm_id_priv, 2533 enum ib_cm_apr_status status, 2534 void *info, 2535 u8 info_length, 2536 const void *private_data, 2537 u8 private_data_len) 2538 { 2539 cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid); 2540 apr_msg->local_comm_id = cm_id_priv->id.local_id; 2541 apr_msg->remote_comm_id = cm_id_priv->id.remote_id; 2542 apr_msg->ap_status = (u8) status; 2543 2544 if (info && info_length) { 2545 apr_msg->info_length = info_length; 2546 memcpy(apr_msg->info, info, info_length); 2547 } 2548 2549 if (private_data && private_data_len) 2550 memcpy(apr_msg->private_data, private_data, private_data_len); 2551 } 2552 2553 int ib_send_cm_apr(struct ib_cm_id *cm_id, 2554 enum ib_cm_apr_status status, 2555 void *info, 2556 u8 info_length, 2557 const void *private_data, 2558 u8 private_data_len) 2559 { 2560 struct cm_id_private *cm_id_priv; 2561 struct ib_mad_send_buf *msg; 2562 unsigned long flags; 2563 int ret; 2564 2565 if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) || 2566 (info && info_length > IB_CM_APR_INFO_LENGTH)) 2567 return -EINVAL; 2568 2569 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2570 spin_lock_irqsave(&cm_id_priv->lock, flags); 2571 if (cm_id->state != IB_CM_ESTABLISHED || 2572 (cm_id->lap_state != IB_CM_LAP_RCVD && 2573 cm_id->lap_state != IB_CM_MRA_LAP_SENT)) { 2574 ret = -EINVAL; 2575 goto out; 2576 } 2577 2578 ret = cm_alloc_msg(cm_id_priv, &msg); 2579 if (ret) 2580 goto out; 2581 2582 cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status, 2583 info, info_length, private_data, private_data_len); 2584 ret = ib_post_send_mad(msg, NULL); 2585 if (ret) { 2586 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2587 cm_free_msg(msg); 2588 return ret; 2589 } 2590 2591 cm_id->lap_state = IB_CM_LAP_IDLE; 2592 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2593 return ret; 2594 } 2595 EXPORT_SYMBOL(ib_send_cm_apr); 2596 2597 static int cm_apr_handler(struct cm_work *work) 2598 { 2599 struct cm_id_private *cm_id_priv; 2600 struct cm_apr_msg *apr_msg; 2601 unsigned long flags; 2602 int ret; 2603 2604 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad; 2605 cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id, 2606 apr_msg->local_comm_id); 2607 if (!cm_id_priv) 2608 return -EINVAL; /* Unmatched reply. */ 2609 2610 work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status; 2611 work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info; 2612 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length; 2613 work->cm_event.private_data = &apr_msg->private_data; 2614 2615 spin_lock_irqsave(&cm_id_priv->lock, flags); 2616 if (cm_id_priv->id.state != IB_CM_ESTABLISHED || 2617 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT && 2618 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) { 2619 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2620 goto out; 2621 } 2622 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE; 2623 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2624 cm_id_priv->msg = NULL; 2625 2626 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2627 if (!ret) 2628 list_add_tail(&work->list, &cm_id_priv->work_list); 2629 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2630 2631 if (ret) 2632 cm_process_work(cm_id_priv, work); 2633 else 2634 cm_deref_id(cm_id_priv); 2635 return 0; 2636 out: 2637 cm_deref_id(cm_id_priv); 2638 return -EINVAL; 2639 } 2640 2641 static int cm_timewait_handler(struct cm_work *work) 2642 { 2643 struct cm_timewait_info *timewait_info; 2644 struct cm_id_private *cm_id_priv; 2645 int ret; 2646 2647 timewait_info = (struct cm_timewait_info *)work; 2648 spin_lock_irq(&cm.lock); 2649 list_del(&timewait_info->list); 2650 spin_unlock_irq(&cm.lock); 2651 2652 cm_id_priv = cm_acquire_id(timewait_info->work.local_id, 2653 timewait_info->work.remote_id); 2654 if (!cm_id_priv) 2655 return -EINVAL; 2656 2657 spin_lock_irq(&cm_id_priv->lock); 2658 if (cm_id_priv->id.state != IB_CM_TIMEWAIT || 2659 cm_id_priv->remote_qpn != timewait_info->remote_qpn) { 2660 spin_unlock_irq(&cm_id_priv->lock); 2661 goto out; 2662 } 2663 cm_id_priv->id.state = IB_CM_IDLE; 2664 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2665 if (!ret) 2666 list_add_tail(&work->list, &cm_id_priv->work_list); 2667 spin_unlock_irq(&cm_id_priv->lock); 2668 2669 if (ret) 2670 cm_process_work(cm_id_priv, work); 2671 else 2672 cm_deref_id(cm_id_priv); 2673 return 0; 2674 out: 2675 cm_deref_id(cm_id_priv); 2676 return -EINVAL; 2677 } 2678 2679 static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg, 2680 struct cm_id_private *cm_id_priv, 2681 struct ib_cm_sidr_req_param *param) 2682 { 2683 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID, 2684 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR)); 2685 sidr_req_msg->request_id = cm_id_priv->id.local_id; 2686 sidr_req_msg->pkey = cpu_to_be16(param->path->pkey); 2687 sidr_req_msg->service_id = param->service_id; 2688 2689 if (param->private_data && param->private_data_len) 2690 memcpy(sidr_req_msg->private_data, param->private_data, 2691 param->private_data_len); 2692 } 2693 2694 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, 2695 struct ib_cm_sidr_req_param *param) 2696 { 2697 struct cm_id_private *cm_id_priv; 2698 struct ib_mad_send_buf *msg; 2699 unsigned long flags; 2700 int ret; 2701 2702 if (!param->path || (param->private_data && 2703 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE)) 2704 return -EINVAL; 2705 2706 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2707 ret = cm_init_av_by_path(param->path, &cm_id_priv->av); 2708 if (ret) 2709 goto out; 2710 2711 cm_id->service_id = param->service_id; 2712 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 2713 cm_id_priv->timeout_ms = param->timeout_ms; 2714 cm_id_priv->max_cm_retries = param->max_cm_retries; 2715 ret = cm_alloc_msg(cm_id_priv, &msg); 2716 if (ret) 2717 goto out; 2718 2719 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv, 2720 param); 2721 msg->timeout_ms = cm_id_priv->timeout_ms; 2722 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT; 2723 2724 spin_lock_irqsave(&cm_id_priv->lock, flags); 2725 if (cm_id->state == IB_CM_IDLE) 2726 ret = ib_post_send_mad(msg, NULL); 2727 else 2728 ret = -EINVAL; 2729 2730 if (ret) { 2731 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2732 cm_free_msg(msg); 2733 goto out; 2734 } 2735 cm_id->state = IB_CM_SIDR_REQ_SENT; 2736 cm_id_priv->msg = msg; 2737 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2738 out: 2739 return ret; 2740 } 2741 EXPORT_SYMBOL(ib_send_cm_sidr_req); 2742 2743 static void cm_format_sidr_req_event(struct cm_work *work, 2744 struct ib_cm_id *listen_id) 2745 { 2746 struct cm_sidr_req_msg *sidr_req_msg; 2747 struct ib_cm_sidr_req_event_param *param; 2748 2749 sidr_req_msg = (struct cm_sidr_req_msg *) 2750 work->mad_recv_wc->recv_buf.mad; 2751 param = &work->cm_event.param.sidr_req_rcvd; 2752 param->pkey = __be16_to_cpu(sidr_req_msg->pkey); 2753 param->listen_id = listen_id; 2754 param->port = work->port->port_num; 2755 work->cm_event.private_data = &sidr_req_msg->private_data; 2756 } 2757 2758 static int cm_sidr_req_handler(struct cm_work *work) 2759 { 2760 struct ib_cm_id *cm_id; 2761 struct cm_id_private *cm_id_priv, *cur_cm_id_priv; 2762 struct cm_sidr_req_msg *sidr_req_msg; 2763 struct ib_wc *wc; 2764 unsigned long flags; 2765 2766 cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL); 2767 if (IS_ERR(cm_id)) 2768 return PTR_ERR(cm_id); 2769 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2770 2771 /* Record SGID/SLID and request ID for lookup. */ 2772 sidr_req_msg = (struct cm_sidr_req_msg *) 2773 work->mad_recv_wc->recv_buf.mad; 2774 wc = work->mad_recv_wc->wc; 2775 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid); 2776 cm_id_priv->av.dgid.global.interface_id = 0; 2777 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 2778 work->mad_recv_wc->recv_buf.grh, 2779 &cm_id_priv->av); 2780 cm_id_priv->id.remote_id = sidr_req_msg->request_id; 2781 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD; 2782 cm_id_priv->tid = sidr_req_msg->hdr.tid; 2783 atomic_inc(&cm_id_priv->work_count); 2784 2785 spin_lock_irqsave(&cm.lock, flags); 2786 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv); 2787 if (cur_cm_id_priv) { 2788 spin_unlock_irqrestore(&cm.lock, flags); 2789 goto out; /* Duplicate message. */ 2790 } 2791 cur_cm_id_priv = cm_find_listen(cm_id->device, 2792 sidr_req_msg->service_id, 2793 sidr_req_msg->private_data); 2794 if (!cur_cm_id_priv) { 2795 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 2796 spin_unlock_irqrestore(&cm.lock, flags); 2797 /* todo: reply with no match */ 2798 goto out; /* No match. */ 2799 } 2800 atomic_inc(&cur_cm_id_priv->refcount); 2801 spin_unlock_irqrestore(&cm.lock, flags); 2802 2803 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; 2804 cm_id_priv->id.context = cur_cm_id_priv->id.context; 2805 cm_id_priv->id.service_id = sidr_req_msg->service_id; 2806 cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); 2807 2808 cm_format_sidr_req_event(work, &cur_cm_id_priv->id); 2809 cm_process_work(cm_id_priv, work); 2810 cm_deref_id(cur_cm_id_priv); 2811 return 0; 2812 out: 2813 ib_destroy_cm_id(&cm_id_priv->id); 2814 return -EINVAL; 2815 } 2816 2817 static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg, 2818 struct cm_id_private *cm_id_priv, 2819 struct ib_cm_sidr_rep_param *param) 2820 { 2821 cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID, 2822 cm_id_priv->tid); 2823 sidr_rep_msg->request_id = cm_id_priv->id.remote_id; 2824 sidr_rep_msg->status = param->status; 2825 cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num)); 2826 sidr_rep_msg->service_id = cm_id_priv->id.service_id; 2827 sidr_rep_msg->qkey = cpu_to_be32(param->qkey); 2828 2829 if (param->info && param->info_length) 2830 memcpy(sidr_rep_msg->info, param->info, param->info_length); 2831 2832 if (param->private_data && param->private_data_len) 2833 memcpy(sidr_rep_msg->private_data, param->private_data, 2834 param->private_data_len); 2835 } 2836 2837 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, 2838 struct ib_cm_sidr_rep_param *param) 2839 { 2840 struct cm_id_private *cm_id_priv; 2841 struct ib_mad_send_buf *msg; 2842 unsigned long flags; 2843 int ret; 2844 2845 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) || 2846 (param->private_data && 2847 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE)) 2848 return -EINVAL; 2849 2850 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2851 spin_lock_irqsave(&cm_id_priv->lock, flags); 2852 if (cm_id->state != IB_CM_SIDR_REQ_RCVD) { 2853 ret = -EINVAL; 2854 goto error; 2855 } 2856 2857 ret = cm_alloc_msg(cm_id_priv, &msg); 2858 if (ret) 2859 goto error; 2860 2861 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv, 2862 param); 2863 ret = ib_post_send_mad(msg, NULL); 2864 if (ret) { 2865 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2866 cm_free_msg(msg); 2867 return ret; 2868 } 2869 cm_id->state = IB_CM_IDLE; 2870 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2871 2872 spin_lock_irqsave(&cm.lock, flags); 2873 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 2874 spin_unlock_irqrestore(&cm.lock, flags); 2875 return 0; 2876 2877 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2878 return ret; 2879 } 2880 EXPORT_SYMBOL(ib_send_cm_sidr_rep); 2881 2882 static void cm_format_sidr_rep_event(struct cm_work *work) 2883 { 2884 struct cm_sidr_rep_msg *sidr_rep_msg; 2885 struct ib_cm_sidr_rep_event_param *param; 2886 2887 sidr_rep_msg = (struct cm_sidr_rep_msg *) 2888 work->mad_recv_wc->recv_buf.mad; 2889 param = &work->cm_event.param.sidr_rep_rcvd; 2890 param->status = sidr_rep_msg->status; 2891 param->qkey = be32_to_cpu(sidr_rep_msg->qkey); 2892 param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg)); 2893 param->info = &sidr_rep_msg->info; 2894 param->info_len = sidr_rep_msg->info_length; 2895 work->cm_event.private_data = &sidr_rep_msg->private_data; 2896 } 2897 2898 static int cm_sidr_rep_handler(struct cm_work *work) 2899 { 2900 struct cm_sidr_rep_msg *sidr_rep_msg; 2901 struct cm_id_private *cm_id_priv; 2902 unsigned long flags; 2903 2904 sidr_rep_msg = (struct cm_sidr_rep_msg *) 2905 work->mad_recv_wc->recv_buf.mad; 2906 cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0); 2907 if (!cm_id_priv) 2908 return -EINVAL; /* Unmatched reply. */ 2909 2910 spin_lock_irqsave(&cm_id_priv->lock, flags); 2911 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) { 2912 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2913 goto out; 2914 } 2915 cm_id_priv->id.state = IB_CM_IDLE; 2916 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2917 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2918 2919 cm_format_sidr_rep_event(work); 2920 cm_process_work(cm_id_priv, work); 2921 return 0; 2922 out: 2923 cm_deref_id(cm_id_priv); 2924 return -EINVAL; 2925 } 2926 2927 static void cm_process_send_error(struct ib_mad_send_buf *msg, 2928 enum ib_wc_status wc_status) 2929 { 2930 struct cm_id_private *cm_id_priv; 2931 struct ib_cm_event cm_event; 2932 enum ib_cm_state state; 2933 unsigned long flags; 2934 int ret; 2935 2936 memset(&cm_event, 0, sizeof cm_event); 2937 cm_id_priv = msg->context[0]; 2938 2939 /* Discard old sends or ones without a response. */ 2940 spin_lock_irqsave(&cm_id_priv->lock, flags); 2941 state = (enum ib_cm_state) (unsigned long) msg->context[1]; 2942 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state) 2943 goto discard; 2944 2945 switch (state) { 2946 case IB_CM_REQ_SENT: 2947 case IB_CM_MRA_REQ_RCVD: 2948 cm_reset_to_idle(cm_id_priv); 2949 cm_event.event = IB_CM_REQ_ERROR; 2950 break; 2951 case IB_CM_REP_SENT: 2952 case IB_CM_MRA_REP_RCVD: 2953 cm_reset_to_idle(cm_id_priv); 2954 cm_event.event = IB_CM_REP_ERROR; 2955 break; 2956 case IB_CM_DREQ_SENT: 2957 cm_enter_timewait(cm_id_priv); 2958 cm_event.event = IB_CM_DREQ_ERROR; 2959 break; 2960 case IB_CM_SIDR_REQ_SENT: 2961 cm_id_priv->id.state = IB_CM_IDLE; 2962 cm_event.event = IB_CM_SIDR_REQ_ERROR; 2963 break; 2964 default: 2965 goto discard; 2966 } 2967 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2968 cm_event.param.send_status = wc_status; 2969 2970 /* No other events can occur on the cm_id at this point. */ 2971 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event); 2972 cm_free_msg(msg); 2973 if (ret) 2974 ib_destroy_cm_id(&cm_id_priv->id); 2975 return; 2976 discard: 2977 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2978 cm_free_msg(msg); 2979 } 2980 2981 static void cm_send_handler(struct ib_mad_agent *mad_agent, 2982 struct ib_mad_send_wc *mad_send_wc) 2983 { 2984 struct ib_mad_send_buf *msg = mad_send_wc->send_buf; 2985 2986 switch (mad_send_wc->status) { 2987 case IB_WC_SUCCESS: 2988 case IB_WC_WR_FLUSH_ERR: 2989 cm_free_msg(msg); 2990 break; 2991 default: 2992 if (msg->context[0] && msg->context[1]) 2993 cm_process_send_error(msg, mad_send_wc->status); 2994 else 2995 cm_free_msg(msg); 2996 break; 2997 } 2998 } 2999 3000 static void cm_work_handler(struct work_struct *_work) 3001 { 3002 struct cm_work *work = container_of(_work, struct cm_work, work.work); 3003 int ret; 3004 3005 switch (work->cm_event.event) { 3006 case IB_CM_REQ_RECEIVED: 3007 ret = cm_req_handler(work); 3008 break; 3009 case IB_CM_MRA_RECEIVED: 3010 ret = cm_mra_handler(work); 3011 break; 3012 case IB_CM_REJ_RECEIVED: 3013 ret = cm_rej_handler(work); 3014 break; 3015 case IB_CM_REP_RECEIVED: 3016 ret = cm_rep_handler(work); 3017 break; 3018 case IB_CM_RTU_RECEIVED: 3019 ret = cm_rtu_handler(work); 3020 break; 3021 case IB_CM_USER_ESTABLISHED: 3022 ret = cm_establish_handler(work); 3023 break; 3024 case IB_CM_DREQ_RECEIVED: 3025 ret = cm_dreq_handler(work); 3026 break; 3027 case IB_CM_DREP_RECEIVED: 3028 ret = cm_drep_handler(work); 3029 break; 3030 case IB_CM_SIDR_REQ_RECEIVED: 3031 ret = cm_sidr_req_handler(work); 3032 break; 3033 case IB_CM_SIDR_REP_RECEIVED: 3034 ret = cm_sidr_rep_handler(work); 3035 break; 3036 case IB_CM_LAP_RECEIVED: 3037 ret = cm_lap_handler(work); 3038 break; 3039 case IB_CM_APR_RECEIVED: 3040 ret = cm_apr_handler(work); 3041 break; 3042 case IB_CM_TIMEWAIT_EXIT: 3043 ret = cm_timewait_handler(work); 3044 break; 3045 default: 3046 ret = -EINVAL; 3047 break; 3048 } 3049 if (ret) 3050 cm_free_work(work); 3051 } 3052 3053 static int cm_establish(struct ib_cm_id *cm_id) 3054 { 3055 struct cm_id_private *cm_id_priv; 3056 struct cm_work *work; 3057 unsigned long flags; 3058 int ret = 0; 3059 3060 work = kmalloc(sizeof *work, GFP_ATOMIC); 3061 if (!work) 3062 return -ENOMEM; 3063 3064 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3065 spin_lock_irqsave(&cm_id_priv->lock, flags); 3066 switch (cm_id->state) 3067 { 3068 case IB_CM_REP_SENT: 3069 case IB_CM_MRA_REP_RCVD: 3070 cm_id->state = IB_CM_ESTABLISHED; 3071 break; 3072 case IB_CM_ESTABLISHED: 3073 ret = -EISCONN; 3074 break; 3075 default: 3076 ret = -EINVAL; 3077 break; 3078 } 3079 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3080 3081 if (ret) { 3082 kfree(work); 3083 goto out; 3084 } 3085 3086 /* 3087 * The CM worker thread may try to destroy the cm_id before it 3088 * can execute this work item. To prevent potential deadlock, 3089 * we need to find the cm_id once we're in the context of the 3090 * worker thread, rather than holding a reference on it. 3091 */ 3092 INIT_DELAYED_WORK(&work->work, cm_work_handler); 3093 work->local_id = cm_id->local_id; 3094 work->remote_id = cm_id->remote_id; 3095 work->mad_recv_wc = NULL; 3096 work->cm_event.event = IB_CM_USER_ESTABLISHED; 3097 queue_delayed_work(cm.wq, &work->work, 0); 3098 out: 3099 return ret; 3100 } 3101 3102 static int cm_migrate(struct ib_cm_id *cm_id) 3103 { 3104 struct cm_id_private *cm_id_priv; 3105 unsigned long flags; 3106 int ret = 0; 3107 3108 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3109 spin_lock_irqsave(&cm_id_priv->lock, flags); 3110 if (cm_id->state == IB_CM_ESTABLISHED && 3111 (cm_id->lap_state == IB_CM_LAP_UNINIT || 3112 cm_id->lap_state == IB_CM_LAP_IDLE)) { 3113 cm_id->lap_state = IB_CM_LAP_IDLE; 3114 cm_id_priv->av = cm_id_priv->alt_av; 3115 } else 3116 ret = -EINVAL; 3117 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3118 3119 return ret; 3120 } 3121 3122 int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event) 3123 { 3124 int ret; 3125 3126 switch (event) { 3127 case IB_EVENT_COMM_EST: 3128 ret = cm_establish(cm_id); 3129 break; 3130 case IB_EVENT_PATH_MIG: 3131 ret = cm_migrate(cm_id); 3132 break; 3133 default: 3134 ret = -EINVAL; 3135 } 3136 return ret; 3137 } 3138 EXPORT_SYMBOL(ib_cm_notify); 3139 3140 static void cm_recv_handler(struct ib_mad_agent *mad_agent, 3141 struct ib_mad_recv_wc *mad_recv_wc) 3142 { 3143 struct cm_work *work; 3144 enum ib_cm_event_type event; 3145 int paths = 0; 3146 3147 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) { 3148 case CM_REQ_ATTR_ID: 3149 paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)-> 3150 alt_local_lid != 0); 3151 event = IB_CM_REQ_RECEIVED; 3152 break; 3153 case CM_MRA_ATTR_ID: 3154 event = IB_CM_MRA_RECEIVED; 3155 break; 3156 case CM_REJ_ATTR_ID: 3157 event = IB_CM_REJ_RECEIVED; 3158 break; 3159 case CM_REP_ATTR_ID: 3160 event = IB_CM_REP_RECEIVED; 3161 break; 3162 case CM_RTU_ATTR_ID: 3163 event = IB_CM_RTU_RECEIVED; 3164 break; 3165 case CM_DREQ_ATTR_ID: 3166 event = IB_CM_DREQ_RECEIVED; 3167 break; 3168 case CM_DREP_ATTR_ID: 3169 event = IB_CM_DREP_RECEIVED; 3170 break; 3171 case CM_SIDR_REQ_ATTR_ID: 3172 event = IB_CM_SIDR_REQ_RECEIVED; 3173 break; 3174 case CM_SIDR_REP_ATTR_ID: 3175 event = IB_CM_SIDR_REP_RECEIVED; 3176 break; 3177 case CM_LAP_ATTR_ID: 3178 paths = 1; 3179 event = IB_CM_LAP_RECEIVED; 3180 break; 3181 case CM_APR_ATTR_ID: 3182 event = IB_CM_APR_RECEIVED; 3183 break; 3184 default: 3185 ib_free_recv_mad(mad_recv_wc); 3186 return; 3187 } 3188 3189 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths, 3190 GFP_KERNEL); 3191 if (!work) { 3192 ib_free_recv_mad(mad_recv_wc); 3193 return; 3194 } 3195 3196 INIT_DELAYED_WORK(&work->work, cm_work_handler); 3197 work->cm_event.event = event; 3198 work->mad_recv_wc = mad_recv_wc; 3199 work->port = (struct cm_port *)mad_agent->context; 3200 queue_delayed_work(cm.wq, &work->work, 0); 3201 } 3202 3203 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, 3204 struct ib_qp_attr *qp_attr, 3205 int *qp_attr_mask) 3206 { 3207 unsigned long flags; 3208 int ret; 3209 3210 spin_lock_irqsave(&cm_id_priv->lock, flags); 3211 switch (cm_id_priv->id.state) { 3212 case IB_CM_REQ_SENT: 3213 case IB_CM_MRA_REQ_RCVD: 3214 case IB_CM_REQ_RCVD: 3215 case IB_CM_MRA_REQ_SENT: 3216 case IB_CM_REP_RCVD: 3217 case IB_CM_MRA_REP_SENT: 3218 case IB_CM_REP_SENT: 3219 case IB_CM_MRA_REP_RCVD: 3220 case IB_CM_ESTABLISHED: 3221 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | 3222 IB_QP_PKEY_INDEX | IB_QP_PORT; 3223 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE; 3224 if (cm_id_priv->responder_resources) 3225 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ | 3226 IB_ACCESS_REMOTE_ATOMIC; 3227 qp_attr->pkey_index = cm_id_priv->av.pkey_index; 3228 qp_attr->port_num = cm_id_priv->av.port->port_num; 3229 ret = 0; 3230 break; 3231 default: 3232 ret = -EINVAL; 3233 break; 3234 } 3235 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3236 return ret; 3237 } 3238 3239 static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv, 3240 struct ib_qp_attr *qp_attr, 3241 int *qp_attr_mask) 3242 { 3243 unsigned long flags; 3244 int ret; 3245 3246 spin_lock_irqsave(&cm_id_priv->lock, flags); 3247 switch (cm_id_priv->id.state) { 3248 case IB_CM_REQ_RCVD: 3249 case IB_CM_MRA_REQ_SENT: 3250 case IB_CM_REP_RCVD: 3251 case IB_CM_MRA_REP_SENT: 3252 case IB_CM_REP_SENT: 3253 case IB_CM_MRA_REP_RCVD: 3254 case IB_CM_ESTABLISHED: 3255 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | 3256 IB_QP_DEST_QPN | IB_QP_RQ_PSN; 3257 qp_attr->ah_attr = cm_id_priv->av.ah_attr; 3258 qp_attr->path_mtu = cm_id_priv->path_mtu; 3259 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn); 3260 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn); 3261 if (cm_id_priv->qp_type == IB_QPT_RC) { 3262 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC | 3263 IB_QP_MIN_RNR_TIMER; 3264 qp_attr->max_dest_rd_atomic = 3265 cm_id_priv->responder_resources; 3266 qp_attr->min_rnr_timer = 0; 3267 } 3268 if (cm_id_priv->alt_av.ah_attr.dlid) { 3269 *qp_attr_mask |= IB_QP_ALT_PATH; 3270 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; 3271 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index; 3272 qp_attr->alt_timeout = 3273 cm_id_priv->alt_av.packet_life_time + 1; 3274 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; 3275 } 3276 ret = 0; 3277 break; 3278 default: 3279 ret = -EINVAL; 3280 break; 3281 } 3282 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3283 return ret; 3284 } 3285 3286 static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv, 3287 struct ib_qp_attr *qp_attr, 3288 int *qp_attr_mask) 3289 { 3290 unsigned long flags; 3291 int ret; 3292 3293 spin_lock_irqsave(&cm_id_priv->lock, flags); 3294 switch (cm_id_priv->id.state) { 3295 /* Allow transition to RTS before sending REP */ 3296 case IB_CM_REQ_RCVD: 3297 case IB_CM_MRA_REQ_SENT: 3298 3299 case IB_CM_REP_RCVD: 3300 case IB_CM_MRA_REP_SENT: 3301 case IB_CM_REP_SENT: 3302 case IB_CM_MRA_REP_RCVD: 3303 case IB_CM_ESTABLISHED: 3304 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) { 3305 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN; 3306 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn); 3307 if (cm_id_priv->qp_type == IB_QPT_RC) { 3308 *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | 3309 IB_QP_RNR_RETRY | 3310 IB_QP_MAX_QP_RD_ATOMIC; 3311 qp_attr->timeout = 3312 cm_id_priv->av.packet_life_time + 1; 3313 qp_attr->retry_cnt = cm_id_priv->retry_count; 3314 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count; 3315 qp_attr->max_rd_atomic = 3316 cm_id_priv->initiator_depth; 3317 } 3318 if (cm_id_priv->alt_av.ah_attr.dlid) { 3319 *qp_attr_mask |= IB_QP_PATH_MIG_STATE; 3320 qp_attr->path_mig_state = IB_MIG_REARM; 3321 } 3322 } else { 3323 *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE; 3324 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; 3325 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index; 3326 qp_attr->alt_timeout = 3327 cm_id_priv->alt_av.packet_life_time + 1; 3328 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; 3329 qp_attr->path_mig_state = IB_MIG_REARM; 3330 } 3331 ret = 0; 3332 break; 3333 default: 3334 ret = -EINVAL; 3335 break; 3336 } 3337 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3338 return ret; 3339 } 3340 3341 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id, 3342 struct ib_qp_attr *qp_attr, 3343 int *qp_attr_mask) 3344 { 3345 struct cm_id_private *cm_id_priv; 3346 int ret; 3347 3348 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3349 switch (qp_attr->qp_state) { 3350 case IB_QPS_INIT: 3351 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask); 3352 break; 3353 case IB_QPS_RTR: 3354 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask); 3355 break; 3356 case IB_QPS_RTS: 3357 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask); 3358 break; 3359 default: 3360 ret = -EINVAL; 3361 break; 3362 } 3363 return ret; 3364 } 3365 EXPORT_SYMBOL(ib_cm_init_qp_attr); 3366 3367 static void cm_add_one(struct ib_device *device) 3368 { 3369 struct cm_device *cm_dev; 3370 struct cm_port *port; 3371 struct ib_mad_reg_req reg_req = { 3372 .mgmt_class = IB_MGMT_CLASS_CM, 3373 .mgmt_class_version = IB_CM_CLASS_VERSION 3374 }; 3375 struct ib_port_modify port_modify = { 3376 .set_port_cap_mask = IB_PORT_CM_SUP 3377 }; 3378 unsigned long flags; 3379 int ret; 3380 u8 i; 3381 3382 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) 3383 return; 3384 3385 cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) * 3386 device->phys_port_cnt, GFP_KERNEL); 3387 if (!cm_dev) 3388 return; 3389 3390 cm_dev->device = device; 3391 3392 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask); 3393 for (i = 1; i <= device->phys_port_cnt; i++) { 3394 port = &cm_dev->port[i-1]; 3395 port->cm_dev = cm_dev; 3396 port->port_num = i; 3397 port->mad_agent = ib_register_mad_agent(device, i, 3398 IB_QPT_GSI, 3399 ®_req, 3400 0, 3401 cm_send_handler, 3402 cm_recv_handler, 3403 port); 3404 if (IS_ERR(port->mad_agent)) 3405 goto error1; 3406 3407 ret = ib_modify_port(device, i, 0, &port_modify); 3408 if (ret) 3409 goto error2; 3410 } 3411 ib_set_client_data(device, &cm_client, cm_dev); 3412 3413 write_lock_irqsave(&cm.device_lock, flags); 3414 list_add_tail(&cm_dev->list, &cm.device_list); 3415 write_unlock_irqrestore(&cm.device_lock, flags); 3416 return; 3417 3418 error2: 3419 ib_unregister_mad_agent(port->mad_agent); 3420 error1: 3421 port_modify.set_port_cap_mask = 0; 3422 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP; 3423 while (--i) { 3424 port = &cm_dev->port[i-1]; 3425 ib_modify_port(device, port->port_num, 0, &port_modify); 3426 ib_unregister_mad_agent(port->mad_agent); 3427 } 3428 kfree(cm_dev); 3429 } 3430 3431 static void cm_remove_one(struct ib_device *device) 3432 { 3433 struct cm_device *cm_dev; 3434 struct cm_port *port; 3435 struct ib_port_modify port_modify = { 3436 .clr_port_cap_mask = IB_PORT_CM_SUP 3437 }; 3438 unsigned long flags; 3439 int i; 3440 3441 cm_dev = ib_get_client_data(device, &cm_client); 3442 if (!cm_dev) 3443 return; 3444 3445 write_lock_irqsave(&cm.device_lock, flags); 3446 list_del(&cm_dev->list); 3447 write_unlock_irqrestore(&cm.device_lock, flags); 3448 3449 for (i = 1; i <= device->phys_port_cnt; i++) { 3450 port = &cm_dev->port[i-1]; 3451 ib_modify_port(device, port->port_num, 0, &port_modify); 3452 ib_unregister_mad_agent(port->mad_agent); 3453 } 3454 kfree(cm_dev); 3455 } 3456 3457 static int __init ib_cm_init(void) 3458 { 3459 int ret; 3460 3461 memset(&cm, 0, sizeof cm); 3462 INIT_LIST_HEAD(&cm.device_list); 3463 rwlock_init(&cm.device_lock); 3464 spin_lock_init(&cm.lock); 3465 cm.listen_service_table = RB_ROOT; 3466 cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); 3467 cm.remote_id_table = RB_ROOT; 3468 cm.remote_qp_table = RB_ROOT; 3469 cm.remote_sidr_table = RB_ROOT; 3470 idr_init(&cm.local_id_table); 3471 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand); 3472 idr_pre_get(&cm.local_id_table, GFP_KERNEL); 3473 INIT_LIST_HEAD(&cm.timewait_list); 3474 3475 cm.wq = create_workqueue("ib_cm"); 3476 if (!cm.wq) 3477 return -ENOMEM; 3478 3479 ret = ib_register_client(&cm_client); 3480 if (ret) 3481 goto error; 3482 3483 return 0; 3484 error: 3485 destroy_workqueue(cm.wq); 3486 return ret; 3487 } 3488 3489 static void __exit ib_cm_cleanup(void) 3490 { 3491 struct cm_timewait_info *timewait_info, *tmp; 3492 3493 spin_lock_irq(&cm.lock); 3494 list_for_each_entry(timewait_info, &cm.timewait_list, list) 3495 cancel_delayed_work(&timewait_info->work.work); 3496 spin_unlock_irq(&cm.lock); 3497 3498 destroy_workqueue(cm.wq); 3499 3500 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) { 3501 list_del(&timewait_info->list); 3502 kfree(timewait_info); 3503 } 3504 3505 ib_unregister_client(&cm_client); 3506 idr_destroy(&cm.local_id_table); 3507 } 3508 3509 module_init(ib_cm_init); 3510 module_exit(ib_cm_cleanup); 3511 3512