1 /* 2 * Copyright (c) 2004-2007 Intel Corporation. All rights reserved. 3 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. 5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/completion.h> 37 #include <linux/dma-mapping.h> 38 #include <linux/device.h> 39 #include <linux/module.h> 40 #include <linux/err.h> 41 #include <linux/idr.h> 42 #include <linux/interrupt.h> 43 #include <linux/random.h> 44 #include <linux/rbtree.h> 45 #include <linux/spinlock.h> 46 #include <linux/slab.h> 47 #include <linux/sysfs.h> 48 #include <linux/workqueue.h> 49 #include <linux/kdev_t.h> 50 #include <linux/etherdevice.h> 51 52 #include <rdma/ib_cache.h> 53 #include <rdma/ib_cm.h> 54 #include "cm_msgs.h" 55 56 MODULE_AUTHOR("Sean Hefty"); 57 MODULE_DESCRIPTION("InfiniBand CM"); 58 MODULE_LICENSE("Dual BSD/GPL"); 59 60 static void cm_add_one(struct ib_device *device); 61 static void cm_remove_one(struct ib_device *device, void *client_data); 62 63 static struct ib_client cm_client = { 64 .name = "cm", 65 .add = cm_add_one, 66 .remove = cm_remove_one 67 }; 68 69 static struct ib_cm { 70 spinlock_t lock; 71 struct list_head device_list; 72 rwlock_t device_lock; 73 struct rb_root listen_service_table; 74 u64 listen_service_id; 75 /* struct rb_root peer_service_table; todo: fix peer to peer */ 76 struct rb_root remote_qp_table; 77 struct rb_root remote_id_table; 78 struct rb_root remote_sidr_table; 79 struct idr local_id_table; 80 __be32 random_id_operand; 81 struct list_head timewait_list; 82 struct workqueue_struct *wq; 83 /* Sync on cm change port state */ 84 spinlock_t state_lock; 85 } cm; 86 87 /* Counter indexes ordered by attribute ID */ 88 enum { 89 CM_REQ_COUNTER, 90 CM_MRA_COUNTER, 91 CM_REJ_COUNTER, 92 CM_REP_COUNTER, 93 CM_RTU_COUNTER, 94 CM_DREQ_COUNTER, 95 CM_DREP_COUNTER, 96 CM_SIDR_REQ_COUNTER, 97 CM_SIDR_REP_COUNTER, 98 CM_LAP_COUNTER, 99 CM_APR_COUNTER, 100 CM_ATTR_COUNT, 101 CM_ATTR_ID_OFFSET = 0x0010, 102 }; 103 104 enum { 105 CM_XMIT, 106 CM_XMIT_RETRIES, 107 CM_RECV, 108 CM_RECV_DUPLICATES, 109 CM_COUNTER_GROUPS 110 }; 111 112 static char const counter_group_names[CM_COUNTER_GROUPS] 113 [sizeof("cm_rx_duplicates")] = { 114 "cm_tx_msgs", "cm_tx_retries", 115 "cm_rx_msgs", "cm_rx_duplicates" 116 }; 117 118 struct cm_counter_group { 119 struct kobject obj; 120 atomic_long_t counter[CM_ATTR_COUNT]; 121 }; 122 123 struct cm_counter_attribute { 124 struct attribute attr; 125 int index; 126 }; 127 128 #define CM_COUNTER_ATTR(_name, _index) \ 129 struct cm_counter_attribute cm_##_name##_counter_attr = { \ 130 .attr = { .name = __stringify(_name), .mode = 0444 }, \ 131 .index = _index \ 132 } 133 134 static CM_COUNTER_ATTR(req, CM_REQ_COUNTER); 135 static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER); 136 static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER); 137 static CM_COUNTER_ATTR(rep, CM_REP_COUNTER); 138 static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER); 139 static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER); 140 static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER); 141 static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER); 142 static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER); 143 static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER); 144 static CM_COUNTER_ATTR(apr, CM_APR_COUNTER); 145 146 static struct attribute *cm_counter_default_attrs[] = { 147 &cm_req_counter_attr.attr, 148 &cm_mra_counter_attr.attr, 149 &cm_rej_counter_attr.attr, 150 &cm_rep_counter_attr.attr, 151 &cm_rtu_counter_attr.attr, 152 &cm_dreq_counter_attr.attr, 153 &cm_drep_counter_attr.attr, 154 &cm_sidr_req_counter_attr.attr, 155 &cm_sidr_rep_counter_attr.attr, 156 &cm_lap_counter_attr.attr, 157 &cm_apr_counter_attr.attr, 158 NULL 159 }; 160 161 struct cm_port { 162 struct cm_device *cm_dev; 163 struct ib_mad_agent *mad_agent; 164 struct kobject port_obj; 165 u8 port_num; 166 struct list_head cm_priv_prim_list; 167 struct list_head cm_priv_altr_list; 168 struct cm_counter_group counter_group[CM_COUNTER_GROUPS]; 169 }; 170 171 struct cm_device { 172 struct list_head list; 173 struct ib_device *ib_device; 174 struct device *device; 175 u8 ack_delay; 176 int going_down; 177 struct cm_port *port[0]; 178 }; 179 180 struct cm_av { 181 struct cm_port *port; 182 union ib_gid dgid; 183 struct ib_ah_attr ah_attr; 184 u16 pkey_index; 185 u8 timeout; 186 }; 187 188 struct cm_work { 189 struct delayed_work work; 190 struct list_head list; 191 struct cm_port *port; 192 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ 193 __be32 local_id; /* Established / timewait */ 194 __be32 remote_id; 195 struct ib_cm_event cm_event; 196 struct ib_sa_path_rec path[0]; 197 }; 198 199 struct cm_timewait_info { 200 struct cm_work work; /* Must be first. */ 201 struct list_head list; 202 struct rb_node remote_qp_node; 203 struct rb_node remote_id_node; 204 __be64 remote_ca_guid; 205 __be32 remote_qpn; 206 u8 inserted_remote_qp; 207 u8 inserted_remote_id; 208 }; 209 210 struct cm_id_private { 211 struct ib_cm_id id; 212 213 struct rb_node service_node; 214 struct rb_node sidr_id_node; 215 spinlock_t lock; /* Do not acquire inside cm.lock */ 216 struct completion comp; 217 atomic_t refcount; 218 /* Number of clients sharing this ib_cm_id. Only valid for listeners. 219 * Protected by the cm.lock spinlock. */ 220 int listen_sharecount; 221 222 struct ib_mad_send_buf *msg; 223 struct cm_timewait_info *timewait_info; 224 /* todo: use alternate port on send failure */ 225 struct cm_av av; 226 struct cm_av alt_av; 227 228 void *private_data; 229 __be64 tid; 230 __be32 local_qpn; 231 __be32 remote_qpn; 232 enum ib_qp_type qp_type; 233 __be32 sq_psn; 234 __be32 rq_psn; 235 int timeout_ms; 236 enum ib_mtu path_mtu; 237 __be16 pkey; 238 u8 private_data_len; 239 u8 max_cm_retries; 240 u8 peer_to_peer; 241 u8 responder_resources; 242 u8 initiator_depth; 243 u8 retry_count; 244 u8 rnr_retry_count; 245 u8 service_timeout; 246 u8 target_ack_delay; 247 248 struct list_head prim_list; 249 struct list_head altr_list; 250 /* Indicates that the send port mad is registered and av is set */ 251 int prim_send_port_not_ready; 252 int altr_send_port_not_ready; 253 254 struct list_head work_list; 255 atomic_t work_count; 256 }; 257 258 static void cm_work_handler(struct work_struct *work); 259 260 static inline void cm_deref_id(struct cm_id_private *cm_id_priv) 261 { 262 if (atomic_dec_and_test(&cm_id_priv->refcount)) 263 complete(&cm_id_priv->comp); 264 } 265 266 static int cm_alloc_msg(struct cm_id_private *cm_id_priv, 267 struct ib_mad_send_buf **msg) 268 { 269 struct ib_mad_agent *mad_agent; 270 struct ib_mad_send_buf *m; 271 struct ib_ah *ah; 272 struct cm_av *av; 273 unsigned long flags, flags2; 274 int ret = 0; 275 276 /* don't let the port to be released till the agent is down */ 277 spin_lock_irqsave(&cm.state_lock, flags2); 278 spin_lock_irqsave(&cm.lock, flags); 279 if (!cm_id_priv->prim_send_port_not_ready) 280 av = &cm_id_priv->av; 281 else if (!cm_id_priv->altr_send_port_not_ready && 282 (cm_id_priv->alt_av.port)) 283 av = &cm_id_priv->alt_av; 284 else { 285 pr_info("%s: not valid CM id\n", __func__); 286 ret = -ENODEV; 287 spin_unlock_irqrestore(&cm.lock, flags); 288 goto out; 289 } 290 spin_unlock_irqrestore(&cm.lock, flags); 291 /* Make sure the port haven't released the mad yet */ 292 mad_agent = cm_id_priv->av.port->mad_agent; 293 if (!mad_agent) { 294 pr_info("%s: not a valid MAD agent\n", __func__); 295 ret = -ENODEV; 296 goto out; 297 } 298 ah = ib_create_ah(mad_agent->qp->pd, &av->ah_attr); 299 if (IS_ERR(ah)) { 300 ret = PTR_ERR(ah); 301 goto out; 302 } 303 304 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, 305 av->pkey_index, 306 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 307 GFP_ATOMIC, 308 IB_MGMT_BASE_VERSION); 309 if (IS_ERR(m)) { 310 ib_destroy_ah(ah); 311 ret = PTR_ERR(m); 312 goto out; 313 } 314 315 /* Timeout set by caller if response is expected. */ 316 m->ah = ah; 317 m->retries = cm_id_priv->max_cm_retries; 318 319 atomic_inc(&cm_id_priv->refcount); 320 m->context[0] = cm_id_priv; 321 *msg = m; 322 323 out: 324 spin_unlock_irqrestore(&cm.state_lock, flags2); 325 return ret; 326 } 327 328 static int cm_alloc_response_msg(struct cm_port *port, 329 struct ib_mad_recv_wc *mad_recv_wc, 330 struct ib_mad_send_buf **msg) 331 { 332 struct ib_mad_send_buf *m; 333 struct ib_ah *ah; 334 335 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc, 336 mad_recv_wc->recv_buf.grh, port->port_num); 337 if (IS_ERR(ah)) 338 return PTR_ERR(ah); 339 340 m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index, 341 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 342 GFP_ATOMIC, 343 IB_MGMT_BASE_VERSION); 344 if (IS_ERR(m)) { 345 ib_destroy_ah(ah); 346 return PTR_ERR(m); 347 } 348 m->ah = ah; 349 *msg = m; 350 return 0; 351 } 352 353 static void cm_free_msg(struct ib_mad_send_buf *msg) 354 { 355 ib_destroy_ah(msg->ah); 356 if (msg->context[0]) 357 cm_deref_id(msg->context[0]); 358 ib_free_send_mad(msg); 359 } 360 361 static void * cm_copy_private_data(const void *private_data, 362 u8 private_data_len) 363 { 364 void *data; 365 366 if (!private_data || !private_data_len) 367 return NULL; 368 369 data = kmemdup(private_data, private_data_len, GFP_KERNEL); 370 if (!data) 371 return ERR_PTR(-ENOMEM); 372 373 return data; 374 } 375 376 static void cm_set_private_data(struct cm_id_private *cm_id_priv, 377 void *private_data, u8 private_data_len) 378 { 379 if (cm_id_priv->private_data && cm_id_priv->private_data_len) 380 kfree(cm_id_priv->private_data); 381 382 cm_id_priv->private_data = private_data; 383 cm_id_priv->private_data_len = private_data_len; 384 } 385 386 static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc, 387 struct ib_grh *grh, struct cm_av *av) 388 { 389 av->port = port; 390 av->pkey_index = wc->pkey_index; 391 ib_init_ah_from_wc(port->cm_dev->ib_device, port->port_num, wc, 392 grh, &av->ah_attr); 393 } 394 395 static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av, 396 struct cm_id_private *cm_id_priv) 397 { 398 struct cm_device *cm_dev; 399 struct cm_port *port = NULL; 400 unsigned long flags; 401 int ret; 402 u8 p; 403 struct net_device *ndev = ib_get_ndev_from_path(path); 404 405 read_lock_irqsave(&cm.device_lock, flags); 406 list_for_each_entry(cm_dev, &cm.device_list, list) { 407 if (!ib_find_cached_gid(cm_dev->ib_device, &path->sgid, 408 path->gid_type, ndev, &p, NULL)) { 409 port = cm_dev->port[p-1]; 410 break; 411 } 412 } 413 read_unlock_irqrestore(&cm.device_lock, flags); 414 415 if (ndev) 416 dev_put(ndev); 417 418 if (!port) 419 return -EINVAL; 420 421 ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num, 422 be16_to_cpu(path->pkey), &av->pkey_index); 423 if (ret) 424 return ret; 425 426 av->port = port; 427 ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path, 428 &av->ah_attr); 429 av->timeout = path->packet_life_time + 1; 430 431 spin_lock_irqsave(&cm.lock, flags); 432 if (&cm_id_priv->av == av) 433 list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list); 434 else if (&cm_id_priv->alt_av == av) 435 list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list); 436 else 437 ret = -EINVAL; 438 439 spin_unlock_irqrestore(&cm.lock, flags); 440 441 return ret; 442 } 443 444 static int cm_alloc_id(struct cm_id_private *cm_id_priv) 445 { 446 unsigned long flags; 447 int id; 448 449 idr_preload(GFP_KERNEL); 450 spin_lock_irqsave(&cm.lock, flags); 451 452 id = idr_alloc_cyclic(&cm.local_id_table, cm_id_priv, 0, 0, GFP_NOWAIT); 453 454 spin_unlock_irqrestore(&cm.lock, flags); 455 idr_preload_end(); 456 457 cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand; 458 return id < 0 ? id : 0; 459 } 460 461 static void cm_free_id(__be32 local_id) 462 { 463 spin_lock_irq(&cm.lock); 464 idr_remove(&cm.local_id_table, 465 (__force int) (local_id ^ cm.random_id_operand)); 466 spin_unlock_irq(&cm.lock); 467 } 468 469 static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id) 470 { 471 struct cm_id_private *cm_id_priv; 472 473 cm_id_priv = idr_find(&cm.local_id_table, 474 (__force int) (local_id ^ cm.random_id_operand)); 475 if (cm_id_priv) { 476 if (cm_id_priv->id.remote_id == remote_id) 477 atomic_inc(&cm_id_priv->refcount); 478 else 479 cm_id_priv = NULL; 480 } 481 482 return cm_id_priv; 483 } 484 485 static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id) 486 { 487 struct cm_id_private *cm_id_priv; 488 489 spin_lock_irq(&cm.lock); 490 cm_id_priv = cm_get_id(local_id, remote_id); 491 spin_unlock_irq(&cm.lock); 492 493 return cm_id_priv; 494 } 495 496 /* 497 * Trivial helpers to strip endian annotation and compare; the 498 * endianness doesn't actually matter since we just need a stable 499 * order for the RB tree. 500 */ 501 static int be32_lt(__be32 a, __be32 b) 502 { 503 return (__force u32) a < (__force u32) b; 504 } 505 506 static int be32_gt(__be32 a, __be32 b) 507 { 508 return (__force u32) a > (__force u32) b; 509 } 510 511 static int be64_lt(__be64 a, __be64 b) 512 { 513 return (__force u64) a < (__force u64) b; 514 } 515 516 static int be64_gt(__be64 a, __be64 b) 517 { 518 return (__force u64) a > (__force u64) b; 519 } 520 521 static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv) 522 { 523 struct rb_node **link = &cm.listen_service_table.rb_node; 524 struct rb_node *parent = NULL; 525 struct cm_id_private *cur_cm_id_priv; 526 __be64 service_id = cm_id_priv->id.service_id; 527 __be64 service_mask = cm_id_priv->id.service_mask; 528 529 while (*link) { 530 parent = *link; 531 cur_cm_id_priv = rb_entry(parent, struct cm_id_private, 532 service_node); 533 if ((cur_cm_id_priv->id.service_mask & service_id) == 534 (service_mask & cur_cm_id_priv->id.service_id) && 535 (cm_id_priv->id.device == cur_cm_id_priv->id.device)) 536 return cur_cm_id_priv; 537 538 if (cm_id_priv->id.device < cur_cm_id_priv->id.device) 539 link = &(*link)->rb_left; 540 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device) 541 link = &(*link)->rb_right; 542 else if (be64_lt(service_id, cur_cm_id_priv->id.service_id)) 543 link = &(*link)->rb_left; 544 else if (be64_gt(service_id, cur_cm_id_priv->id.service_id)) 545 link = &(*link)->rb_right; 546 else 547 link = &(*link)->rb_right; 548 } 549 rb_link_node(&cm_id_priv->service_node, parent, link); 550 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table); 551 return NULL; 552 } 553 554 static struct cm_id_private * cm_find_listen(struct ib_device *device, 555 __be64 service_id) 556 { 557 struct rb_node *node = cm.listen_service_table.rb_node; 558 struct cm_id_private *cm_id_priv; 559 560 while (node) { 561 cm_id_priv = rb_entry(node, struct cm_id_private, service_node); 562 if ((cm_id_priv->id.service_mask & service_id) == 563 cm_id_priv->id.service_id && 564 (cm_id_priv->id.device == device)) 565 return cm_id_priv; 566 567 if (device < cm_id_priv->id.device) 568 node = node->rb_left; 569 else if (device > cm_id_priv->id.device) 570 node = node->rb_right; 571 else if (be64_lt(service_id, cm_id_priv->id.service_id)) 572 node = node->rb_left; 573 else if (be64_gt(service_id, cm_id_priv->id.service_id)) 574 node = node->rb_right; 575 else 576 node = node->rb_right; 577 } 578 return NULL; 579 } 580 581 static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info 582 *timewait_info) 583 { 584 struct rb_node **link = &cm.remote_id_table.rb_node; 585 struct rb_node *parent = NULL; 586 struct cm_timewait_info *cur_timewait_info; 587 __be64 remote_ca_guid = timewait_info->remote_ca_guid; 588 __be32 remote_id = timewait_info->work.remote_id; 589 590 while (*link) { 591 parent = *link; 592 cur_timewait_info = rb_entry(parent, struct cm_timewait_info, 593 remote_id_node); 594 if (be32_lt(remote_id, cur_timewait_info->work.remote_id)) 595 link = &(*link)->rb_left; 596 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id)) 597 link = &(*link)->rb_right; 598 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) 599 link = &(*link)->rb_left; 600 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) 601 link = &(*link)->rb_right; 602 else 603 return cur_timewait_info; 604 } 605 timewait_info->inserted_remote_id = 1; 606 rb_link_node(&timewait_info->remote_id_node, parent, link); 607 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table); 608 return NULL; 609 } 610 611 static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid, 612 __be32 remote_id) 613 { 614 struct rb_node *node = cm.remote_id_table.rb_node; 615 struct cm_timewait_info *timewait_info; 616 617 while (node) { 618 timewait_info = rb_entry(node, struct cm_timewait_info, 619 remote_id_node); 620 if (be32_lt(remote_id, timewait_info->work.remote_id)) 621 node = node->rb_left; 622 else if (be32_gt(remote_id, timewait_info->work.remote_id)) 623 node = node->rb_right; 624 else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid)) 625 node = node->rb_left; 626 else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid)) 627 node = node->rb_right; 628 else 629 return timewait_info; 630 } 631 return NULL; 632 } 633 634 static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info 635 *timewait_info) 636 { 637 struct rb_node **link = &cm.remote_qp_table.rb_node; 638 struct rb_node *parent = NULL; 639 struct cm_timewait_info *cur_timewait_info; 640 __be64 remote_ca_guid = timewait_info->remote_ca_guid; 641 __be32 remote_qpn = timewait_info->remote_qpn; 642 643 while (*link) { 644 parent = *link; 645 cur_timewait_info = rb_entry(parent, struct cm_timewait_info, 646 remote_qp_node); 647 if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn)) 648 link = &(*link)->rb_left; 649 else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn)) 650 link = &(*link)->rb_right; 651 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) 652 link = &(*link)->rb_left; 653 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) 654 link = &(*link)->rb_right; 655 else 656 return cur_timewait_info; 657 } 658 timewait_info->inserted_remote_qp = 1; 659 rb_link_node(&timewait_info->remote_qp_node, parent, link); 660 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table); 661 return NULL; 662 } 663 664 static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private 665 *cm_id_priv) 666 { 667 struct rb_node **link = &cm.remote_sidr_table.rb_node; 668 struct rb_node *parent = NULL; 669 struct cm_id_private *cur_cm_id_priv; 670 union ib_gid *port_gid = &cm_id_priv->av.dgid; 671 __be32 remote_id = cm_id_priv->id.remote_id; 672 673 while (*link) { 674 parent = *link; 675 cur_cm_id_priv = rb_entry(parent, struct cm_id_private, 676 sidr_id_node); 677 if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id)) 678 link = &(*link)->rb_left; 679 else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id)) 680 link = &(*link)->rb_right; 681 else { 682 int cmp; 683 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid, 684 sizeof *port_gid); 685 if (cmp < 0) 686 link = &(*link)->rb_left; 687 else if (cmp > 0) 688 link = &(*link)->rb_right; 689 else 690 return cur_cm_id_priv; 691 } 692 } 693 rb_link_node(&cm_id_priv->sidr_id_node, parent, link); 694 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 695 return NULL; 696 } 697 698 static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv, 699 enum ib_cm_sidr_status status) 700 { 701 struct ib_cm_sidr_rep_param param; 702 703 memset(¶m, 0, sizeof param); 704 param.status = status; 705 ib_send_cm_sidr_rep(&cm_id_priv->id, ¶m); 706 } 707 708 struct ib_cm_id *ib_create_cm_id(struct ib_device *device, 709 ib_cm_handler cm_handler, 710 void *context) 711 { 712 struct cm_id_private *cm_id_priv; 713 int ret; 714 715 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL); 716 if (!cm_id_priv) 717 return ERR_PTR(-ENOMEM); 718 719 cm_id_priv->id.state = IB_CM_IDLE; 720 cm_id_priv->id.device = device; 721 cm_id_priv->id.cm_handler = cm_handler; 722 cm_id_priv->id.context = context; 723 cm_id_priv->id.remote_cm_qpn = 1; 724 ret = cm_alloc_id(cm_id_priv); 725 if (ret) 726 goto error; 727 728 spin_lock_init(&cm_id_priv->lock); 729 init_completion(&cm_id_priv->comp); 730 INIT_LIST_HEAD(&cm_id_priv->work_list); 731 INIT_LIST_HEAD(&cm_id_priv->prim_list); 732 INIT_LIST_HEAD(&cm_id_priv->altr_list); 733 atomic_set(&cm_id_priv->work_count, -1); 734 atomic_set(&cm_id_priv->refcount, 1); 735 return &cm_id_priv->id; 736 737 error: 738 kfree(cm_id_priv); 739 return ERR_PTR(-ENOMEM); 740 } 741 EXPORT_SYMBOL(ib_create_cm_id); 742 743 static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv) 744 { 745 struct cm_work *work; 746 747 if (list_empty(&cm_id_priv->work_list)) 748 return NULL; 749 750 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list); 751 list_del(&work->list); 752 return work; 753 } 754 755 static void cm_free_work(struct cm_work *work) 756 { 757 if (work->mad_recv_wc) 758 ib_free_recv_mad(work->mad_recv_wc); 759 kfree(work); 760 } 761 762 static inline int cm_convert_to_ms(int iba_time) 763 { 764 /* approximate conversion to ms from 4.096us x 2^iba_time */ 765 return 1 << max(iba_time - 8, 0); 766 } 767 768 /* 769 * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time 770 * Because of how ack_timeout is stored, adding one doubles the timeout. 771 * To avoid large timeouts, select the max(ack_delay, life_time + 1), and 772 * increment it (round up) only if the other is within 50%. 773 */ 774 static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time) 775 { 776 int ack_timeout = packet_life_time + 1; 777 778 if (ack_timeout >= ca_ack_delay) 779 ack_timeout += (ca_ack_delay >= (ack_timeout - 1)); 780 else 781 ack_timeout = ca_ack_delay + 782 (ack_timeout >= (ca_ack_delay - 1)); 783 784 return min(31, ack_timeout); 785 } 786 787 static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info) 788 { 789 if (timewait_info->inserted_remote_id) { 790 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table); 791 timewait_info->inserted_remote_id = 0; 792 } 793 794 if (timewait_info->inserted_remote_qp) { 795 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table); 796 timewait_info->inserted_remote_qp = 0; 797 } 798 } 799 800 static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id) 801 { 802 struct cm_timewait_info *timewait_info; 803 804 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL); 805 if (!timewait_info) 806 return ERR_PTR(-ENOMEM); 807 808 timewait_info->work.local_id = local_id; 809 INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler); 810 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; 811 return timewait_info; 812 } 813 814 static void cm_enter_timewait(struct cm_id_private *cm_id_priv) 815 { 816 int wait_time; 817 unsigned long flags; 818 struct cm_device *cm_dev; 819 820 cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client); 821 if (!cm_dev) 822 return; 823 824 spin_lock_irqsave(&cm.lock, flags); 825 cm_cleanup_timewait(cm_id_priv->timewait_info); 826 list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list); 827 spin_unlock_irqrestore(&cm.lock, flags); 828 829 /* 830 * The cm_id could be destroyed by the user before we exit timewait. 831 * To protect against this, we search for the cm_id after exiting 832 * timewait before notifying the user that we've exited timewait. 833 */ 834 cm_id_priv->id.state = IB_CM_TIMEWAIT; 835 wait_time = cm_convert_to_ms(cm_id_priv->av.timeout); 836 837 /* Check if the device started its remove_one */ 838 spin_lock_irqsave(&cm.lock, flags); 839 if (!cm_dev->going_down) 840 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, 841 msecs_to_jiffies(wait_time)); 842 spin_unlock_irqrestore(&cm.lock, flags); 843 844 cm_id_priv->timewait_info = NULL; 845 } 846 847 static void cm_reset_to_idle(struct cm_id_private *cm_id_priv) 848 { 849 unsigned long flags; 850 851 cm_id_priv->id.state = IB_CM_IDLE; 852 if (cm_id_priv->timewait_info) { 853 spin_lock_irqsave(&cm.lock, flags); 854 cm_cleanup_timewait(cm_id_priv->timewait_info); 855 spin_unlock_irqrestore(&cm.lock, flags); 856 kfree(cm_id_priv->timewait_info); 857 cm_id_priv->timewait_info = NULL; 858 } 859 } 860 861 static void cm_destroy_id(struct ib_cm_id *cm_id, int err) 862 { 863 struct cm_id_private *cm_id_priv; 864 struct cm_work *work; 865 866 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 867 retest: 868 spin_lock_irq(&cm_id_priv->lock); 869 switch (cm_id->state) { 870 case IB_CM_LISTEN: 871 spin_unlock_irq(&cm_id_priv->lock); 872 873 spin_lock_irq(&cm.lock); 874 if (--cm_id_priv->listen_sharecount > 0) { 875 /* The id is still shared. */ 876 cm_deref_id(cm_id_priv); 877 spin_unlock_irq(&cm.lock); 878 return; 879 } 880 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table); 881 spin_unlock_irq(&cm.lock); 882 break; 883 case IB_CM_SIDR_REQ_SENT: 884 cm_id->state = IB_CM_IDLE; 885 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 886 spin_unlock_irq(&cm_id_priv->lock); 887 break; 888 case IB_CM_SIDR_REQ_RCVD: 889 spin_unlock_irq(&cm_id_priv->lock); 890 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT); 891 spin_lock_irq(&cm.lock); 892 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) 893 rb_erase(&cm_id_priv->sidr_id_node, 894 &cm.remote_sidr_table); 895 spin_unlock_irq(&cm.lock); 896 break; 897 case IB_CM_REQ_SENT: 898 case IB_CM_MRA_REQ_RCVD: 899 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 900 spin_unlock_irq(&cm_id_priv->lock); 901 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, 902 &cm_id_priv->id.device->node_guid, 903 sizeof cm_id_priv->id.device->node_guid, 904 NULL, 0); 905 break; 906 case IB_CM_REQ_RCVD: 907 if (err == -ENOMEM) { 908 /* Do not reject to allow future retries. */ 909 cm_reset_to_idle(cm_id_priv); 910 spin_unlock_irq(&cm_id_priv->lock); 911 } else { 912 spin_unlock_irq(&cm_id_priv->lock); 913 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, 914 NULL, 0, NULL, 0); 915 } 916 break; 917 case IB_CM_REP_SENT: 918 case IB_CM_MRA_REP_RCVD: 919 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 920 /* Fall through */ 921 case IB_CM_MRA_REQ_SENT: 922 case IB_CM_REP_RCVD: 923 case IB_CM_MRA_REP_SENT: 924 spin_unlock_irq(&cm_id_priv->lock); 925 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, 926 NULL, 0, NULL, 0); 927 break; 928 case IB_CM_ESTABLISHED: 929 spin_unlock_irq(&cm_id_priv->lock); 930 if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) 931 break; 932 ib_send_cm_dreq(cm_id, NULL, 0); 933 goto retest; 934 case IB_CM_DREQ_SENT: 935 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 936 cm_enter_timewait(cm_id_priv); 937 spin_unlock_irq(&cm_id_priv->lock); 938 break; 939 case IB_CM_DREQ_RCVD: 940 spin_unlock_irq(&cm_id_priv->lock); 941 ib_send_cm_drep(cm_id, NULL, 0); 942 break; 943 default: 944 spin_unlock_irq(&cm_id_priv->lock); 945 break; 946 } 947 948 spin_lock_irq(&cm.lock); 949 if (!list_empty(&cm_id_priv->altr_list) && 950 (!cm_id_priv->altr_send_port_not_ready)) 951 list_del(&cm_id_priv->altr_list); 952 if (!list_empty(&cm_id_priv->prim_list) && 953 (!cm_id_priv->prim_send_port_not_ready)) 954 list_del(&cm_id_priv->prim_list); 955 spin_unlock_irq(&cm.lock); 956 957 cm_free_id(cm_id->local_id); 958 cm_deref_id(cm_id_priv); 959 wait_for_completion(&cm_id_priv->comp); 960 while ((work = cm_dequeue_work(cm_id_priv)) != NULL) 961 cm_free_work(work); 962 kfree(cm_id_priv->private_data); 963 kfree(cm_id_priv); 964 } 965 966 void ib_destroy_cm_id(struct ib_cm_id *cm_id) 967 { 968 cm_destroy_id(cm_id, 0); 969 } 970 EXPORT_SYMBOL(ib_destroy_cm_id); 971 972 /** 973 * __ib_cm_listen - Initiates listening on the specified service ID for 974 * connection and service ID resolution requests. 975 * @cm_id: Connection identifier associated with the listen request. 976 * @service_id: Service identifier matched against incoming connection 977 * and service ID resolution requests. The service ID should be specified 978 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will 979 * assign a service ID to the caller. 980 * @service_mask: Mask applied to service ID used to listen across a 981 * range of service IDs. If set to 0, the service ID is matched 982 * exactly. This parameter is ignored if %service_id is set to 983 * IB_CM_ASSIGN_SERVICE_ID. 984 */ 985 static int __ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, 986 __be64 service_mask) 987 { 988 struct cm_id_private *cm_id_priv, *cur_cm_id_priv; 989 int ret = 0; 990 991 service_mask = service_mask ? service_mask : ~cpu_to_be64(0); 992 service_id &= service_mask; 993 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID && 994 (service_id != IB_CM_ASSIGN_SERVICE_ID)) 995 return -EINVAL; 996 997 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 998 if (cm_id->state != IB_CM_IDLE) 999 return -EINVAL; 1000 1001 cm_id->state = IB_CM_LISTEN; 1002 ++cm_id_priv->listen_sharecount; 1003 1004 if (service_id == IB_CM_ASSIGN_SERVICE_ID) { 1005 cm_id->service_id = cpu_to_be64(cm.listen_service_id++); 1006 cm_id->service_mask = ~cpu_to_be64(0); 1007 } else { 1008 cm_id->service_id = service_id; 1009 cm_id->service_mask = service_mask; 1010 } 1011 cur_cm_id_priv = cm_insert_listen(cm_id_priv); 1012 1013 if (cur_cm_id_priv) { 1014 cm_id->state = IB_CM_IDLE; 1015 --cm_id_priv->listen_sharecount; 1016 ret = -EBUSY; 1017 } 1018 return ret; 1019 } 1020 1021 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask) 1022 { 1023 unsigned long flags; 1024 int ret; 1025 1026 spin_lock_irqsave(&cm.lock, flags); 1027 ret = __ib_cm_listen(cm_id, service_id, service_mask); 1028 spin_unlock_irqrestore(&cm.lock, flags); 1029 1030 return ret; 1031 } 1032 EXPORT_SYMBOL(ib_cm_listen); 1033 1034 /** 1035 * Create a new listening ib_cm_id and listen on the given service ID. 1036 * 1037 * If there's an existing ID listening on that same device and service ID, 1038 * return it. 1039 * 1040 * @device: Device associated with the cm_id. All related communication will 1041 * be associated with the specified device. 1042 * @cm_handler: Callback invoked to notify the user of CM events. 1043 * @service_id: Service identifier matched against incoming connection 1044 * and service ID resolution requests. The service ID should be specified 1045 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will 1046 * assign a service ID to the caller. 1047 * 1048 * Callers should call ib_destroy_cm_id when done with the listener ID. 1049 */ 1050 struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device, 1051 ib_cm_handler cm_handler, 1052 __be64 service_id) 1053 { 1054 struct cm_id_private *cm_id_priv; 1055 struct ib_cm_id *cm_id; 1056 unsigned long flags; 1057 int err = 0; 1058 1059 /* Create an ID in advance, since the creation may sleep */ 1060 cm_id = ib_create_cm_id(device, cm_handler, NULL); 1061 if (IS_ERR(cm_id)) 1062 return cm_id; 1063 1064 spin_lock_irqsave(&cm.lock, flags); 1065 1066 if (service_id == IB_CM_ASSIGN_SERVICE_ID) 1067 goto new_id; 1068 1069 /* Find an existing ID */ 1070 cm_id_priv = cm_find_listen(device, service_id); 1071 if (cm_id_priv) { 1072 if (cm_id->cm_handler != cm_handler || cm_id->context) { 1073 /* Sharing an ib_cm_id with different handlers is not 1074 * supported */ 1075 spin_unlock_irqrestore(&cm.lock, flags); 1076 return ERR_PTR(-EINVAL); 1077 } 1078 atomic_inc(&cm_id_priv->refcount); 1079 ++cm_id_priv->listen_sharecount; 1080 spin_unlock_irqrestore(&cm.lock, flags); 1081 1082 ib_destroy_cm_id(cm_id); 1083 cm_id = &cm_id_priv->id; 1084 return cm_id; 1085 } 1086 1087 new_id: 1088 /* Use newly created ID */ 1089 err = __ib_cm_listen(cm_id, service_id, 0); 1090 1091 spin_unlock_irqrestore(&cm.lock, flags); 1092 1093 if (err) { 1094 ib_destroy_cm_id(cm_id); 1095 return ERR_PTR(err); 1096 } 1097 return cm_id; 1098 } 1099 EXPORT_SYMBOL(ib_cm_insert_listen); 1100 1101 static __be64 cm_form_tid(struct cm_id_private *cm_id_priv, 1102 enum cm_msg_sequence msg_seq) 1103 { 1104 u64 hi_tid, low_tid; 1105 1106 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32; 1107 low_tid = (u64) ((__force u32)cm_id_priv->id.local_id | 1108 (msg_seq << 30)); 1109 return cpu_to_be64(hi_tid | low_tid); 1110 } 1111 1112 static void cm_format_mad_hdr(struct ib_mad_hdr *hdr, 1113 __be16 attr_id, __be64 tid) 1114 { 1115 hdr->base_version = IB_MGMT_BASE_VERSION; 1116 hdr->mgmt_class = IB_MGMT_CLASS_CM; 1117 hdr->class_version = IB_CM_CLASS_VERSION; 1118 hdr->method = IB_MGMT_METHOD_SEND; 1119 hdr->attr_id = attr_id; 1120 hdr->tid = tid; 1121 } 1122 1123 static void cm_format_req(struct cm_req_msg *req_msg, 1124 struct cm_id_private *cm_id_priv, 1125 struct ib_cm_req_param *param) 1126 { 1127 struct ib_sa_path_rec *pri_path = param->primary_path; 1128 struct ib_sa_path_rec *alt_path = param->alternate_path; 1129 1130 cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID, 1131 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ)); 1132 1133 req_msg->local_comm_id = cm_id_priv->id.local_id; 1134 req_msg->service_id = param->service_id; 1135 req_msg->local_ca_guid = cm_id_priv->id.device->node_guid; 1136 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num)); 1137 cm_req_set_init_depth(req_msg, param->initiator_depth); 1138 cm_req_set_remote_resp_timeout(req_msg, 1139 param->remote_cm_response_timeout); 1140 cm_req_set_qp_type(req_msg, param->qp_type); 1141 cm_req_set_flow_ctrl(req_msg, param->flow_control); 1142 cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn)); 1143 cm_req_set_local_resp_timeout(req_msg, 1144 param->local_cm_response_timeout); 1145 req_msg->pkey = param->primary_path->pkey; 1146 cm_req_set_path_mtu(req_msg, param->primary_path->mtu); 1147 cm_req_set_max_cm_retries(req_msg, param->max_cm_retries); 1148 1149 if (param->qp_type != IB_QPT_XRC_INI) { 1150 cm_req_set_resp_res(req_msg, param->responder_resources); 1151 cm_req_set_retry_count(req_msg, param->retry_count); 1152 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count); 1153 cm_req_set_srq(req_msg, param->srq); 1154 } 1155 1156 if (pri_path->hop_limit <= 1) { 1157 req_msg->primary_local_lid = pri_path->slid; 1158 req_msg->primary_remote_lid = pri_path->dlid; 1159 } else { 1160 /* Work-around until there's a way to obtain remote LID info */ 1161 req_msg->primary_local_lid = IB_LID_PERMISSIVE; 1162 req_msg->primary_remote_lid = IB_LID_PERMISSIVE; 1163 } 1164 req_msg->primary_local_gid = pri_path->sgid; 1165 req_msg->primary_remote_gid = pri_path->dgid; 1166 cm_req_set_primary_flow_label(req_msg, pri_path->flow_label); 1167 cm_req_set_primary_packet_rate(req_msg, pri_path->rate); 1168 req_msg->primary_traffic_class = pri_path->traffic_class; 1169 req_msg->primary_hop_limit = pri_path->hop_limit; 1170 cm_req_set_primary_sl(req_msg, pri_path->sl); 1171 cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1)); 1172 cm_req_set_primary_local_ack_timeout(req_msg, 1173 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay, 1174 pri_path->packet_life_time)); 1175 1176 if (alt_path) { 1177 if (alt_path->hop_limit <= 1) { 1178 req_msg->alt_local_lid = alt_path->slid; 1179 req_msg->alt_remote_lid = alt_path->dlid; 1180 } else { 1181 req_msg->alt_local_lid = IB_LID_PERMISSIVE; 1182 req_msg->alt_remote_lid = IB_LID_PERMISSIVE; 1183 } 1184 req_msg->alt_local_gid = alt_path->sgid; 1185 req_msg->alt_remote_gid = alt_path->dgid; 1186 cm_req_set_alt_flow_label(req_msg, 1187 alt_path->flow_label); 1188 cm_req_set_alt_packet_rate(req_msg, alt_path->rate); 1189 req_msg->alt_traffic_class = alt_path->traffic_class; 1190 req_msg->alt_hop_limit = alt_path->hop_limit; 1191 cm_req_set_alt_sl(req_msg, alt_path->sl); 1192 cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1)); 1193 cm_req_set_alt_local_ack_timeout(req_msg, 1194 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay, 1195 alt_path->packet_life_time)); 1196 } 1197 1198 if (param->private_data && param->private_data_len) 1199 memcpy(req_msg->private_data, param->private_data, 1200 param->private_data_len); 1201 } 1202 1203 static int cm_validate_req_param(struct ib_cm_req_param *param) 1204 { 1205 /* peer-to-peer not supported */ 1206 if (param->peer_to_peer) 1207 return -EINVAL; 1208 1209 if (!param->primary_path) 1210 return -EINVAL; 1211 1212 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC && 1213 param->qp_type != IB_QPT_XRC_INI) 1214 return -EINVAL; 1215 1216 if (param->private_data && 1217 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE) 1218 return -EINVAL; 1219 1220 if (param->alternate_path && 1221 (param->alternate_path->pkey != param->primary_path->pkey || 1222 param->alternate_path->mtu != param->primary_path->mtu)) 1223 return -EINVAL; 1224 1225 return 0; 1226 } 1227 1228 int ib_send_cm_req(struct ib_cm_id *cm_id, 1229 struct ib_cm_req_param *param) 1230 { 1231 struct cm_id_private *cm_id_priv; 1232 struct cm_req_msg *req_msg; 1233 unsigned long flags; 1234 int ret; 1235 1236 ret = cm_validate_req_param(param); 1237 if (ret) 1238 return ret; 1239 1240 /* Verify that we're not in timewait. */ 1241 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1242 spin_lock_irqsave(&cm_id_priv->lock, flags); 1243 if (cm_id->state != IB_CM_IDLE) { 1244 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1245 ret = -EINVAL; 1246 goto out; 1247 } 1248 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1249 1250 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> 1251 id.local_id); 1252 if (IS_ERR(cm_id_priv->timewait_info)) { 1253 ret = PTR_ERR(cm_id_priv->timewait_info); 1254 goto out; 1255 } 1256 1257 ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av, 1258 cm_id_priv); 1259 if (ret) 1260 goto error1; 1261 if (param->alternate_path) { 1262 ret = cm_init_av_by_path(param->alternate_path, 1263 &cm_id_priv->alt_av, cm_id_priv); 1264 if (ret) 1265 goto error1; 1266 } 1267 cm_id->service_id = param->service_id; 1268 cm_id->service_mask = ~cpu_to_be64(0); 1269 cm_id_priv->timeout_ms = cm_convert_to_ms( 1270 param->primary_path->packet_life_time) * 2 + 1271 cm_convert_to_ms( 1272 param->remote_cm_response_timeout); 1273 cm_id_priv->max_cm_retries = param->max_cm_retries; 1274 cm_id_priv->initiator_depth = param->initiator_depth; 1275 cm_id_priv->responder_resources = param->responder_resources; 1276 cm_id_priv->retry_count = param->retry_count; 1277 cm_id_priv->path_mtu = param->primary_path->mtu; 1278 cm_id_priv->pkey = param->primary_path->pkey; 1279 cm_id_priv->qp_type = param->qp_type; 1280 1281 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg); 1282 if (ret) 1283 goto error1; 1284 1285 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad; 1286 cm_format_req(req_msg, cm_id_priv, param); 1287 cm_id_priv->tid = req_msg->hdr.tid; 1288 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms; 1289 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT; 1290 1291 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg); 1292 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg); 1293 1294 spin_lock_irqsave(&cm_id_priv->lock, flags); 1295 ret = ib_post_send_mad(cm_id_priv->msg, NULL); 1296 if (ret) { 1297 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1298 goto error2; 1299 } 1300 BUG_ON(cm_id->state != IB_CM_IDLE); 1301 cm_id->state = IB_CM_REQ_SENT; 1302 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1303 return 0; 1304 1305 error2: cm_free_msg(cm_id_priv->msg); 1306 error1: kfree(cm_id_priv->timewait_info); 1307 out: return ret; 1308 } 1309 EXPORT_SYMBOL(ib_send_cm_req); 1310 1311 static int cm_issue_rej(struct cm_port *port, 1312 struct ib_mad_recv_wc *mad_recv_wc, 1313 enum ib_cm_rej_reason reason, 1314 enum cm_msg_response msg_rejected, 1315 void *ari, u8 ari_length) 1316 { 1317 struct ib_mad_send_buf *msg = NULL; 1318 struct cm_rej_msg *rej_msg, *rcv_msg; 1319 int ret; 1320 1321 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg); 1322 if (ret) 1323 return ret; 1324 1325 /* We just need common CM header information. Cast to any message. */ 1326 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad; 1327 rej_msg = (struct cm_rej_msg *) msg->mad; 1328 1329 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid); 1330 rej_msg->remote_comm_id = rcv_msg->local_comm_id; 1331 rej_msg->local_comm_id = rcv_msg->remote_comm_id; 1332 cm_rej_set_msg_rejected(rej_msg, msg_rejected); 1333 rej_msg->reason = cpu_to_be16(reason); 1334 1335 if (ari && ari_length) { 1336 cm_rej_set_reject_info_len(rej_msg, ari_length); 1337 memcpy(rej_msg->ari, ari, ari_length); 1338 } 1339 1340 ret = ib_post_send_mad(msg, NULL); 1341 if (ret) 1342 cm_free_msg(msg); 1343 1344 return ret; 1345 } 1346 1347 static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid, 1348 __be32 local_qpn, __be32 remote_qpn) 1349 { 1350 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) || 1351 ((local_ca_guid == remote_ca_guid) && 1352 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn)))); 1353 } 1354 1355 static void cm_format_paths_from_req(struct cm_req_msg *req_msg, 1356 struct ib_sa_path_rec *primary_path, 1357 struct ib_sa_path_rec *alt_path) 1358 { 1359 memset(primary_path, 0, sizeof *primary_path); 1360 primary_path->dgid = req_msg->primary_local_gid; 1361 primary_path->sgid = req_msg->primary_remote_gid; 1362 primary_path->dlid = req_msg->primary_local_lid; 1363 primary_path->slid = req_msg->primary_remote_lid; 1364 primary_path->flow_label = cm_req_get_primary_flow_label(req_msg); 1365 primary_path->hop_limit = req_msg->primary_hop_limit; 1366 primary_path->traffic_class = req_msg->primary_traffic_class; 1367 primary_path->reversible = 1; 1368 primary_path->pkey = req_msg->pkey; 1369 primary_path->sl = cm_req_get_primary_sl(req_msg); 1370 primary_path->mtu_selector = IB_SA_EQ; 1371 primary_path->mtu = cm_req_get_path_mtu(req_msg); 1372 primary_path->rate_selector = IB_SA_EQ; 1373 primary_path->rate = cm_req_get_primary_packet_rate(req_msg); 1374 primary_path->packet_life_time_selector = IB_SA_EQ; 1375 primary_path->packet_life_time = 1376 cm_req_get_primary_local_ack_timeout(req_msg); 1377 primary_path->packet_life_time -= (primary_path->packet_life_time > 0); 1378 primary_path->service_id = req_msg->service_id; 1379 1380 if (req_msg->alt_local_lid) { 1381 memset(alt_path, 0, sizeof *alt_path); 1382 alt_path->dgid = req_msg->alt_local_gid; 1383 alt_path->sgid = req_msg->alt_remote_gid; 1384 alt_path->dlid = req_msg->alt_local_lid; 1385 alt_path->slid = req_msg->alt_remote_lid; 1386 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg); 1387 alt_path->hop_limit = req_msg->alt_hop_limit; 1388 alt_path->traffic_class = req_msg->alt_traffic_class; 1389 alt_path->reversible = 1; 1390 alt_path->pkey = req_msg->pkey; 1391 alt_path->sl = cm_req_get_alt_sl(req_msg); 1392 alt_path->mtu_selector = IB_SA_EQ; 1393 alt_path->mtu = cm_req_get_path_mtu(req_msg); 1394 alt_path->rate_selector = IB_SA_EQ; 1395 alt_path->rate = cm_req_get_alt_packet_rate(req_msg); 1396 alt_path->packet_life_time_selector = IB_SA_EQ; 1397 alt_path->packet_life_time = 1398 cm_req_get_alt_local_ack_timeout(req_msg); 1399 alt_path->packet_life_time -= (alt_path->packet_life_time > 0); 1400 alt_path->service_id = req_msg->service_id; 1401 } 1402 } 1403 1404 static u16 cm_get_bth_pkey(struct cm_work *work) 1405 { 1406 struct ib_device *ib_dev = work->port->cm_dev->ib_device; 1407 u8 port_num = work->port->port_num; 1408 u16 pkey_index = work->mad_recv_wc->wc->pkey_index; 1409 u16 pkey; 1410 int ret; 1411 1412 ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey); 1413 if (ret) { 1414 dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %d, pkey index %d). %d\n", 1415 port_num, pkey_index, ret); 1416 return 0; 1417 } 1418 1419 return pkey; 1420 } 1421 1422 static void cm_format_req_event(struct cm_work *work, 1423 struct cm_id_private *cm_id_priv, 1424 struct ib_cm_id *listen_id) 1425 { 1426 struct cm_req_msg *req_msg; 1427 struct ib_cm_req_event_param *param; 1428 1429 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1430 param = &work->cm_event.param.req_rcvd; 1431 param->listen_id = listen_id; 1432 param->bth_pkey = cm_get_bth_pkey(work); 1433 param->port = cm_id_priv->av.port->port_num; 1434 param->primary_path = &work->path[0]; 1435 if (req_msg->alt_local_lid) 1436 param->alternate_path = &work->path[1]; 1437 else 1438 param->alternate_path = NULL; 1439 param->remote_ca_guid = req_msg->local_ca_guid; 1440 param->remote_qkey = be32_to_cpu(req_msg->local_qkey); 1441 param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg)); 1442 param->qp_type = cm_req_get_qp_type(req_msg); 1443 param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg)); 1444 param->responder_resources = cm_req_get_init_depth(req_msg); 1445 param->initiator_depth = cm_req_get_resp_res(req_msg); 1446 param->local_cm_response_timeout = 1447 cm_req_get_remote_resp_timeout(req_msg); 1448 param->flow_control = cm_req_get_flow_ctrl(req_msg); 1449 param->remote_cm_response_timeout = 1450 cm_req_get_local_resp_timeout(req_msg); 1451 param->retry_count = cm_req_get_retry_count(req_msg); 1452 param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); 1453 param->srq = cm_req_get_srq(req_msg); 1454 work->cm_event.private_data = &req_msg->private_data; 1455 } 1456 1457 static void cm_process_work(struct cm_id_private *cm_id_priv, 1458 struct cm_work *work) 1459 { 1460 int ret; 1461 1462 /* We will typically only have the current event to report. */ 1463 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event); 1464 cm_free_work(work); 1465 1466 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) { 1467 spin_lock_irq(&cm_id_priv->lock); 1468 work = cm_dequeue_work(cm_id_priv); 1469 spin_unlock_irq(&cm_id_priv->lock); 1470 BUG_ON(!work); 1471 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, 1472 &work->cm_event); 1473 cm_free_work(work); 1474 } 1475 cm_deref_id(cm_id_priv); 1476 if (ret) 1477 cm_destroy_id(&cm_id_priv->id, ret); 1478 } 1479 1480 static void cm_format_mra(struct cm_mra_msg *mra_msg, 1481 struct cm_id_private *cm_id_priv, 1482 enum cm_msg_response msg_mraed, u8 service_timeout, 1483 const void *private_data, u8 private_data_len) 1484 { 1485 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid); 1486 cm_mra_set_msg_mraed(mra_msg, msg_mraed); 1487 mra_msg->local_comm_id = cm_id_priv->id.local_id; 1488 mra_msg->remote_comm_id = cm_id_priv->id.remote_id; 1489 cm_mra_set_service_timeout(mra_msg, service_timeout); 1490 1491 if (private_data && private_data_len) 1492 memcpy(mra_msg->private_data, private_data, private_data_len); 1493 } 1494 1495 static void cm_format_rej(struct cm_rej_msg *rej_msg, 1496 struct cm_id_private *cm_id_priv, 1497 enum ib_cm_rej_reason reason, 1498 void *ari, 1499 u8 ari_length, 1500 const void *private_data, 1501 u8 private_data_len) 1502 { 1503 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid); 1504 rej_msg->remote_comm_id = cm_id_priv->id.remote_id; 1505 1506 switch(cm_id_priv->id.state) { 1507 case IB_CM_REQ_RCVD: 1508 rej_msg->local_comm_id = 0; 1509 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ); 1510 break; 1511 case IB_CM_MRA_REQ_SENT: 1512 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1513 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ); 1514 break; 1515 case IB_CM_REP_RCVD: 1516 case IB_CM_MRA_REP_SENT: 1517 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1518 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP); 1519 break; 1520 default: 1521 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1522 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER); 1523 break; 1524 } 1525 1526 rej_msg->reason = cpu_to_be16(reason); 1527 if (ari && ari_length) { 1528 cm_rej_set_reject_info_len(rej_msg, ari_length); 1529 memcpy(rej_msg->ari, ari, ari_length); 1530 } 1531 1532 if (private_data && private_data_len) 1533 memcpy(rej_msg->private_data, private_data, private_data_len); 1534 } 1535 1536 static void cm_dup_req_handler(struct cm_work *work, 1537 struct cm_id_private *cm_id_priv) 1538 { 1539 struct ib_mad_send_buf *msg = NULL; 1540 int ret; 1541 1542 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. 1543 counter[CM_REQ_COUNTER]); 1544 1545 /* Quick state check to discard duplicate REQs. */ 1546 if (cm_id_priv->id.state == IB_CM_REQ_RCVD) 1547 return; 1548 1549 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); 1550 if (ret) 1551 return; 1552 1553 spin_lock_irq(&cm_id_priv->lock); 1554 switch (cm_id_priv->id.state) { 1555 case IB_CM_MRA_REQ_SENT: 1556 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 1557 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout, 1558 cm_id_priv->private_data, 1559 cm_id_priv->private_data_len); 1560 break; 1561 case IB_CM_TIMEWAIT: 1562 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv, 1563 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0); 1564 break; 1565 default: 1566 goto unlock; 1567 } 1568 spin_unlock_irq(&cm_id_priv->lock); 1569 1570 ret = ib_post_send_mad(msg, NULL); 1571 if (ret) 1572 goto free; 1573 return; 1574 1575 unlock: spin_unlock_irq(&cm_id_priv->lock); 1576 free: cm_free_msg(msg); 1577 } 1578 1579 static struct cm_id_private * cm_match_req(struct cm_work *work, 1580 struct cm_id_private *cm_id_priv) 1581 { 1582 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv; 1583 struct cm_timewait_info *timewait_info; 1584 struct cm_req_msg *req_msg; 1585 1586 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1587 1588 /* Check for possible duplicate REQ. */ 1589 spin_lock_irq(&cm.lock); 1590 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info); 1591 if (timewait_info) { 1592 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id, 1593 timewait_info->work.remote_id); 1594 spin_unlock_irq(&cm.lock); 1595 if (cur_cm_id_priv) { 1596 cm_dup_req_handler(work, cur_cm_id_priv); 1597 cm_deref_id(cur_cm_id_priv); 1598 } 1599 return NULL; 1600 } 1601 1602 /* Check for stale connections. */ 1603 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info); 1604 if (timewait_info) { 1605 cm_cleanup_timewait(cm_id_priv->timewait_info); 1606 spin_unlock_irq(&cm.lock); 1607 cm_issue_rej(work->port, work->mad_recv_wc, 1608 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ, 1609 NULL, 0); 1610 return NULL; 1611 } 1612 1613 /* Find matching listen request. */ 1614 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device, 1615 req_msg->service_id); 1616 if (!listen_cm_id_priv) { 1617 cm_cleanup_timewait(cm_id_priv->timewait_info); 1618 spin_unlock_irq(&cm.lock); 1619 cm_issue_rej(work->port, work->mad_recv_wc, 1620 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ, 1621 NULL, 0); 1622 goto out; 1623 } 1624 atomic_inc(&listen_cm_id_priv->refcount); 1625 atomic_inc(&cm_id_priv->refcount); 1626 cm_id_priv->id.state = IB_CM_REQ_RCVD; 1627 atomic_inc(&cm_id_priv->work_count); 1628 spin_unlock_irq(&cm.lock); 1629 out: 1630 return listen_cm_id_priv; 1631 } 1632 1633 /* 1634 * Work-around for inter-subnet connections. If the LIDs are permissive, 1635 * we need to override the LID/SL data in the REQ with the LID information 1636 * in the work completion. 1637 */ 1638 static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc) 1639 { 1640 if (!cm_req_get_primary_subnet_local(req_msg)) { 1641 if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) { 1642 req_msg->primary_local_lid = cpu_to_be16(wc->slid); 1643 cm_req_set_primary_sl(req_msg, wc->sl); 1644 } 1645 1646 if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE) 1647 req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits); 1648 } 1649 1650 if (!cm_req_get_alt_subnet_local(req_msg)) { 1651 if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) { 1652 req_msg->alt_local_lid = cpu_to_be16(wc->slid); 1653 cm_req_set_alt_sl(req_msg, wc->sl); 1654 } 1655 1656 if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE) 1657 req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits); 1658 } 1659 } 1660 1661 static int cm_req_handler(struct cm_work *work) 1662 { 1663 struct ib_cm_id *cm_id; 1664 struct cm_id_private *cm_id_priv, *listen_cm_id_priv; 1665 struct cm_req_msg *req_msg; 1666 union ib_gid gid; 1667 struct ib_gid_attr gid_attr; 1668 int ret; 1669 1670 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1671 1672 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL); 1673 if (IS_ERR(cm_id)) 1674 return PTR_ERR(cm_id); 1675 1676 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1677 cm_id_priv->id.remote_id = req_msg->local_comm_id; 1678 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 1679 work->mad_recv_wc->recv_buf.grh, 1680 &cm_id_priv->av); 1681 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> 1682 id.local_id); 1683 if (IS_ERR(cm_id_priv->timewait_info)) { 1684 ret = PTR_ERR(cm_id_priv->timewait_info); 1685 goto destroy; 1686 } 1687 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id; 1688 cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid; 1689 cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg); 1690 1691 listen_cm_id_priv = cm_match_req(work, cm_id_priv); 1692 if (!listen_cm_id_priv) { 1693 ret = -EINVAL; 1694 kfree(cm_id_priv->timewait_info); 1695 goto destroy; 1696 } 1697 1698 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; 1699 cm_id_priv->id.context = listen_cm_id_priv->id.context; 1700 cm_id_priv->id.service_id = req_msg->service_id; 1701 cm_id_priv->id.service_mask = ~cpu_to_be64(0); 1702 1703 cm_process_routed_req(req_msg, work->mad_recv_wc->wc); 1704 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); 1705 1706 memcpy(work->path[0].dmac, cm_id_priv->av.ah_attr.dmac, ETH_ALEN); 1707 work->path[0].hop_limit = cm_id_priv->av.ah_attr.grh.hop_limit; 1708 ret = ib_get_cached_gid(work->port->cm_dev->ib_device, 1709 work->port->port_num, 1710 cm_id_priv->av.ah_attr.grh.sgid_index, 1711 &gid, &gid_attr); 1712 if (!ret) { 1713 if (gid_attr.ndev) { 1714 work->path[0].ifindex = gid_attr.ndev->ifindex; 1715 work->path[0].net = dev_net(gid_attr.ndev); 1716 dev_put(gid_attr.ndev); 1717 } 1718 work->path[0].gid_type = gid_attr.gid_type; 1719 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av, 1720 cm_id_priv); 1721 } 1722 if (ret) { 1723 int err = ib_get_cached_gid(work->port->cm_dev->ib_device, 1724 work->port->port_num, 0, 1725 &work->path[0].sgid, 1726 &gid_attr); 1727 if (!err && gid_attr.ndev) { 1728 work->path[0].ifindex = gid_attr.ndev->ifindex; 1729 work->path[0].net = dev_net(gid_attr.ndev); 1730 dev_put(gid_attr.ndev); 1731 } 1732 work->path[0].gid_type = gid_attr.gid_type; 1733 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID, 1734 &work->path[0].sgid, sizeof work->path[0].sgid, 1735 NULL, 0); 1736 goto rejected; 1737 } 1738 if (req_msg->alt_local_lid) { 1739 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av, 1740 cm_id_priv); 1741 if (ret) { 1742 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID, 1743 &work->path[0].sgid, 1744 sizeof work->path[0].sgid, NULL, 0); 1745 goto rejected; 1746 } 1747 } 1748 cm_id_priv->tid = req_msg->hdr.tid; 1749 cm_id_priv->timeout_ms = cm_convert_to_ms( 1750 cm_req_get_local_resp_timeout(req_msg)); 1751 cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg); 1752 cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg); 1753 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg); 1754 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg); 1755 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg); 1756 cm_id_priv->pkey = req_msg->pkey; 1757 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg); 1758 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg); 1759 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); 1760 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg); 1761 1762 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id); 1763 cm_process_work(cm_id_priv, work); 1764 cm_deref_id(listen_cm_id_priv); 1765 return 0; 1766 1767 rejected: 1768 atomic_dec(&cm_id_priv->refcount); 1769 cm_deref_id(listen_cm_id_priv); 1770 destroy: 1771 ib_destroy_cm_id(cm_id); 1772 return ret; 1773 } 1774 1775 static void cm_format_rep(struct cm_rep_msg *rep_msg, 1776 struct cm_id_private *cm_id_priv, 1777 struct ib_cm_rep_param *param) 1778 { 1779 cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid); 1780 rep_msg->local_comm_id = cm_id_priv->id.local_id; 1781 rep_msg->remote_comm_id = cm_id_priv->id.remote_id; 1782 cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn)); 1783 rep_msg->resp_resources = param->responder_resources; 1784 cm_rep_set_target_ack_delay(rep_msg, 1785 cm_id_priv->av.port->cm_dev->ack_delay); 1786 cm_rep_set_failover(rep_msg, param->failover_accepted); 1787 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count); 1788 rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid; 1789 1790 if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) { 1791 rep_msg->initiator_depth = param->initiator_depth; 1792 cm_rep_set_flow_ctrl(rep_msg, param->flow_control); 1793 cm_rep_set_srq(rep_msg, param->srq); 1794 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num)); 1795 } else { 1796 cm_rep_set_srq(rep_msg, 1); 1797 cm_rep_set_local_eecn(rep_msg, cpu_to_be32(param->qp_num)); 1798 } 1799 1800 if (param->private_data && param->private_data_len) 1801 memcpy(rep_msg->private_data, param->private_data, 1802 param->private_data_len); 1803 } 1804 1805 int ib_send_cm_rep(struct ib_cm_id *cm_id, 1806 struct ib_cm_rep_param *param) 1807 { 1808 struct cm_id_private *cm_id_priv; 1809 struct ib_mad_send_buf *msg; 1810 struct cm_rep_msg *rep_msg; 1811 unsigned long flags; 1812 int ret; 1813 1814 if (param->private_data && 1815 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE) 1816 return -EINVAL; 1817 1818 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1819 spin_lock_irqsave(&cm_id_priv->lock, flags); 1820 if (cm_id->state != IB_CM_REQ_RCVD && 1821 cm_id->state != IB_CM_MRA_REQ_SENT) { 1822 ret = -EINVAL; 1823 goto out; 1824 } 1825 1826 ret = cm_alloc_msg(cm_id_priv, &msg); 1827 if (ret) 1828 goto out; 1829 1830 rep_msg = (struct cm_rep_msg *) msg->mad; 1831 cm_format_rep(rep_msg, cm_id_priv, param); 1832 msg->timeout_ms = cm_id_priv->timeout_ms; 1833 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT; 1834 1835 ret = ib_post_send_mad(msg, NULL); 1836 if (ret) { 1837 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1838 cm_free_msg(msg); 1839 return ret; 1840 } 1841 1842 cm_id->state = IB_CM_REP_SENT; 1843 cm_id_priv->msg = msg; 1844 cm_id_priv->initiator_depth = param->initiator_depth; 1845 cm_id_priv->responder_resources = param->responder_resources; 1846 cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg); 1847 cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF); 1848 1849 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1850 return ret; 1851 } 1852 EXPORT_SYMBOL(ib_send_cm_rep); 1853 1854 static void cm_format_rtu(struct cm_rtu_msg *rtu_msg, 1855 struct cm_id_private *cm_id_priv, 1856 const void *private_data, 1857 u8 private_data_len) 1858 { 1859 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid); 1860 rtu_msg->local_comm_id = cm_id_priv->id.local_id; 1861 rtu_msg->remote_comm_id = cm_id_priv->id.remote_id; 1862 1863 if (private_data && private_data_len) 1864 memcpy(rtu_msg->private_data, private_data, private_data_len); 1865 } 1866 1867 int ib_send_cm_rtu(struct ib_cm_id *cm_id, 1868 const void *private_data, 1869 u8 private_data_len) 1870 { 1871 struct cm_id_private *cm_id_priv; 1872 struct ib_mad_send_buf *msg; 1873 unsigned long flags; 1874 void *data; 1875 int ret; 1876 1877 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE) 1878 return -EINVAL; 1879 1880 data = cm_copy_private_data(private_data, private_data_len); 1881 if (IS_ERR(data)) 1882 return PTR_ERR(data); 1883 1884 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1885 spin_lock_irqsave(&cm_id_priv->lock, flags); 1886 if (cm_id->state != IB_CM_REP_RCVD && 1887 cm_id->state != IB_CM_MRA_REP_SENT) { 1888 ret = -EINVAL; 1889 goto error; 1890 } 1891 1892 ret = cm_alloc_msg(cm_id_priv, &msg); 1893 if (ret) 1894 goto error; 1895 1896 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, 1897 private_data, private_data_len); 1898 1899 ret = ib_post_send_mad(msg, NULL); 1900 if (ret) { 1901 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1902 cm_free_msg(msg); 1903 kfree(data); 1904 return ret; 1905 } 1906 1907 cm_id->state = IB_CM_ESTABLISHED; 1908 cm_set_private_data(cm_id_priv, data, private_data_len); 1909 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1910 return 0; 1911 1912 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1913 kfree(data); 1914 return ret; 1915 } 1916 EXPORT_SYMBOL(ib_send_cm_rtu); 1917 1918 static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type) 1919 { 1920 struct cm_rep_msg *rep_msg; 1921 struct ib_cm_rep_event_param *param; 1922 1923 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; 1924 param = &work->cm_event.param.rep_rcvd; 1925 param->remote_ca_guid = rep_msg->local_ca_guid; 1926 param->remote_qkey = be32_to_cpu(rep_msg->local_qkey); 1927 param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type)); 1928 param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg)); 1929 param->responder_resources = rep_msg->initiator_depth; 1930 param->initiator_depth = rep_msg->resp_resources; 1931 param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg); 1932 param->failover_accepted = cm_rep_get_failover(rep_msg); 1933 param->flow_control = cm_rep_get_flow_ctrl(rep_msg); 1934 param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg); 1935 param->srq = cm_rep_get_srq(rep_msg); 1936 work->cm_event.private_data = &rep_msg->private_data; 1937 } 1938 1939 static void cm_dup_rep_handler(struct cm_work *work) 1940 { 1941 struct cm_id_private *cm_id_priv; 1942 struct cm_rep_msg *rep_msg; 1943 struct ib_mad_send_buf *msg = NULL; 1944 int ret; 1945 1946 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad; 1947 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 1948 rep_msg->local_comm_id); 1949 if (!cm_id_priv) 1950 return; 1951 1952 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. 1953 counter[CM_REP_COUNTER]); 1954 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); 1955 if (ret) 1956 goto deref; 1957 1958 spin_lock_irq(&cm_id_priv->lock); 1959 if (cm_id_priv->id.state == IB_CM_ESTABLISHED) 1960 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, 1961 cm_id_priv->private_data, 1962 cm_id_priv->private_data_len); 1963 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT) 1964 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 1965 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout, 1966 cm_id_priv->private_data, 1967 cm_id_priv->private_data_len); 1968 else 1969 goto unlock; 1970 spin_unlock_irq(&cm_id_priv->lock); 1971 1972 ret = ib_post_send_mad(msg, NULL); 1973 if (ret) 1974 goto free; 1975 goto deref; 1976 1977 unlock: spin_unlock_irq(&cm_id_priv->lock); 1978 free: cm_free_msg(msg); 1979 deref: cm_deref_id(cm_id_priv); 1980 } 1981 1982 static int cm_rep_handler(struct cm_work *work) 1983 { 1984 struct cm_id_private *cm_id_priv; 1985 struct cm_rep_msg *rep_msg; 1986 int ret; 1987 1988 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; 1989 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0); 1990 if (!cm_id_priv) { 1991 cm_dup_rep_handler(work); 1992 return -EINVAL; 1993 } 1994 1995 cm_format_rep_event(work, cm_id_priv->qp_type); 1996 1997 spin_lock_irq(&cm_id_priv->lock); 1998 switch (cm_id_priv->id.state) { 1999 case IB_CM_REQ_SENT: 2000 case IB_CM_MRA_REQ_RCVD: 2001 break; 2002 default: 2003 spin_unlock_irq(&cm_id_priv->lock); 2004 ret = -EINVAL; 2005 goto error; 2006 } 2007 2008 cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id; 2009 cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid; 2010 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type); 2011 2012 spin_lock(&cm.lock); 2013 /* Check for duplicate REP. */ 2014 if (cm_insert_remote_id(cm_id_priv->timewait_info)) { 2015 spin_unlock(&cm.lock); 2016 spin_unlock_irq(&cm_id_priv->lock); 2017 ret = -EINVAL; 2018 goto error; 2019 } 2020 /* Check for a stale connection. */ 2021 if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) { 2022 rb_erase(&cm_id_priv->timewait_info->remote_id_node, 2023 &cm.remote_id_table); 2024 cm_id_priv->timewait_info->inserted_remote_id = 0; 2025 spin_unlock(&cm.lock); 2026 spin_unlock_irq(&cm_id_priv->lock); 2027 cm_issue_rej(work->port, work->mad_recv_wc, 2028 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP, 2029 NULL, 0); 2030 ret = -EINVAL; 2031 goto error; 2032 } 2033 spin_unlock(&cm.lock); 2034 2035 cm_id_priv->id.state = IB_CM_REP_RCVD; 2036 cm_id_priv->id.remote_id = rep_msg->local_comm_id; 2037 cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type); 2038 cm_id_priv->initiator_depth = rep_msg->resp_resources; 2039 cm_id_priv->responder_resources = rep_msg->initiator_depth; 2040 cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg); 2041 cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg); 2042 cm_id_priv->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg); 2043 cm_id_priv->av.timeout = 2044 cm_ack_timeout(cm_id_priv->target_ack_delay, 2045 cm_id_priv->av.timeout - 1); 2046 cm_id_priv->alt_av.timeout = 2047 cm_ack_timeout(cm_id_priv->target_ack_delay, 2048 cm_id_priv->alt_av.timeout - 1); 2049 2050 /* todo: handle peer_to_peer */ 2051 2052 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2053 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2054 if (!ret) 2055 list_add_tail(&work->list, &cm_id_priv->work_list); 2056 spin_unlock_irq(&cm_id_priv->lock); 2057 2058 if (ret) 2059 cm_process_work(cm_id_priv, work); 2060 else 2061 cm_deref_id(cm_id_priv); 2062 return 0; 2063 2064 error: 2065 cm_deref_id(cm_id_priv); 2066 return ret; 2067 } 2068 2069 static int cm_establish_handler(struct cm_work *work) 2070 { 2071 struct cm_id_private *cm_id_priv; 2072 int ret; 2073 2074 /* See comment in cm_establish about lookup. */ 2075 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id); 2076 if (!cm_id_priv) 2077 return -EINVAL; 2078 2079 spin_lock_irq(&cm_id_priv->lock); 2080 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) { 2081 spin_unlock_irq(&cm_id_priv->lock); 2082 goto out; 2083 } 2084 2085 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2086 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2087 if (!ret) 2088 list_add_tail(&work->list, &cm_id_priv->work_list); 2089 spin_unlock_irq(&cm_id_priv->lock); 2090 2091 if (ret) 2092 cm_process_work(cm_id_priv, work); 2093 else 2094 cm_deref_id(cm_id_priv); 2095 return 0; 2096 out: 2097 cm_deref_id(cm_id_priv); 2098 return -EINVAL; 2099 } 2100 2101 static int cm_rtu_handler(struct cm_work *work) 2102 { 2103 struct cm_id_private *cm_id_priv; 2104 struct cm_rtu_msg *rtu_msg; 2105 int ret; 2106 2107 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad; 2108 cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id, 2109 rtu_msg->local_comm_id); 2110 if (!cm_id_priv) 2111 return -EINVAL; 2112 2113 work->cm_event.private_data = &rtu_msg->private_data; 2114 2115 spin_lock_irq(&cm_id_priv->lock); 2116 if (cm_id_priv->id.state != IB_CM_REP_SENT && 2117 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) { 2118 spin_unlock_irq(&cm_id_priv->lock); 2119 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. 2120 counter[CM_RTU_COUNTER]); 2121 goto out; 2122 } 2123 cm_id_priv->id.state = IB_CM_ESTABLISHED; 2124 2125 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2126 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2127 if (!ret) 2128 list_add_tail(&work->list, &cm_id_priv->work_list); 2129 spin_unlock_irq(&cm_id_priv->lock); 2130 2131 if (ret) 2132 cm_process_work(cm_id_priv, work); 2133 else 2134 cm_deref_id(cm_id_priv); 2135 return 0; 2136 out: 2137 cm_deref_id(cm_id_priv); 2138 return -EINVAL; 2139 } 2140 2141 static void cm_format_dreq(struct cm_dreq_msg *dreq_msg, 2142 struct cm_id_private *cm_id_priv, 2143 const void *private_data, 2144 u8 private_data_len) 2145 { 2146 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID, 2147 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ)); 2148 dreq_msg->local_comm_id = cm_id_priv->id.local_id; 2149 dreq_msg->remote_comm_id = cm_id_priv->id.remote_id; 2150 cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn); 2151 2152 if (private_data && private_data_len) 2153 memcpy(dreq_msg->private_data, private_data, private_data_len); 2154 } 2155 2156 int ib_send_cm_dreq(struct ib_cm_id *cm_id, 2157 const void *private_data, 2158 u8 private_data_len) 2159 { 2160 struct cm_id_private *cm_id_priv; 2161 struct ib_mad_send_buf *msg; 2162 unsigned long flags; 2163 int ret; 2164 2165 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE) 2166 return -EINVAL; 2167 2168 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2169 spin_lock_irqsave(&cm_id_priv->lock, flags); 2170 if (cm_id->state != IB_CM_ESTABLISHED) { 2171 ret = -EINVAL; 2172 goto out; 2173 } 2174 2175 if (cm_id->lap_state == IB_CM_LAP_SENT || 2176 cm_id->lap_state == IB_CM_MRA_LAP_RCVD) 2177 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2178 2179 ret = cm_alloc_msg(cm_id_priv, &msg); 2180 if (ret) { 2181 cm_enter_timewait(cm_id_priv); 2182 goto out; 2183 } 2184 2185 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv, 2186 private_data, private_data_len); 2187 msg->timeout_ms = cm_id_priv->timeout_ms; 2188 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT; 2189 2190 ret = ib_post_send_mad(msg, NULL); 2191 if (ret) { 2192 cm_enter_timewait(cm_id_priv); 2193 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2194 cm_free_msg(msg); 2195 return ret; 2196 } 2197 2198 cm_id->state = IB_CM_DREQ_SENT; 2199 cm_id_priv->msg = msg; 2200 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2201 return ret; 2202 } 2203 EXPORT_SYMBOL(ib_send_cm_dreq); 2204 2205 static void cm_format_drep(struct cm_drep_msg *drep_msg, 2206 struct cm_id_private *cm_id_priv, 2207 const void *private_data, 2208 u8 private_data_len) 2209 { 2210 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid); 2211 drep_msg->local_comm_id = cm_id_priv->id.local_id; 2212 drep_msg->remote_comm_id = cm_id_priv->id.remote_id; 2213 2214 if (private_data && private_data_len) 2215 memcpy(drep_msg->private_data, private_data, private_data_len); 2216 } 2217 2218 int ib_send_cm_drep(struct ib_cm_id *cm_id, 2219 const void *private_data, 2220 u8 private_data_len) 2221 { 2222 struct cm_id_private *cm_id_priv; 2223 struct ib_mad_send_buf *msg; 2224 unsigned long flags; 2225 void *data; 2226 int ret; 2227 2228 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE) 2229 return -EINVAL; 2230 2231 data = cm_copy_private_data(private_data, private_data_len); 2232 if (IS_ERR(data)) 2233 return PTR_ERR(data); 2234 2235 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2236 spin_lock_irqsave(&cm_id_priv->lock, flags); 2237 if (cm_id->state != IB_CM_DREQ_RCVD) { 2238 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2239 kfree(data); 2240 return -EINVAL; 2241 } 2242 2243 cm_set_private_data(cm_id_priv, data, private_data_len); 2244 cm_enter_timewait(cm_id_priv); 2245 2246 ret = cm_alloc_msg(cm_id_priv, &msg); 2247 if (ret) 2248 goto out; 2249 2250 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, 2251 private_data, private_data_len); 2252 2253 ret = ib_post_send_mad(msg, NULL); 2254 if (ret) { 2255 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2256 cm_free_msg(msg); 2257 return ret; 2258 } 2259 2260 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2261 return ret; 2262 } 2263 EXPORT_SYMBOL(ib_send_cm_drep); 2264 2265 static int cm_issue_drep(struct cm_port *port, 2266 struct ib_mad_recv_wc *mad_recv_wc) 2267 { 2268 struct ib_mad_send_buf *msg = NULL; 2269 struct cm_dreq_msg *dreq_msg; 2270 struct cm_drep_msg *drep_msg; 2271 int ret; 2272 2273 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg); 2274 if (ret) 2275 return ret; 2276 2277 dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad; 2278 drep_msg = (struct cm_drep_msg *) msg->mad; 2279 2280 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid); 2281 drep_msg->remote_comm_id = dreq_msg->local_comm_id; 2282 drep_msg->local_comm_id = dreq_msg->remote_comm_id; 2283 2284 ret = ib_post_send_mad(msg, NULL); 2285 if (ret) 2286 cm_free_msg(msg); 2287 2288 return ret; 2289 } 2290 2291 static int cm_dreq_handler(struct cm_work *work) 2292 { 2293 struct cm_id_private *cm_id_priv; 2294 struct cm_dreq_msg *dreq_msg; 2295 struct ib_mad_send_buf *msg = NULL; 2296 int ret; 2297 2298 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad; 2299 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id, 2300 dreq_msg->local_comm_id); 2301 if (!cm_id_priv) { 2302 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. 2303 counter[CM_DREQ_COUNTER]); 2304 cm_issue_drep(work->port, work->mad_recv_wc); 2305 return -EINVAL; 2306 } 2307 2308 work->cm_event.private_data = &dreq_msg->private_data; 2309 2310 spin_lock_irq(&cm_id_priv->lock); 2311 if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg)) 2312 goto unlock; 2313 2314 switch (cm_id_priv->id.state) { 2315 case IB_CM_REP_SENT: 2316 case IB_CM_DREQ_SENT: 2317 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2318 break; 2319 case IB_CM_ESTABLISHED: 2320 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT || 2321 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) 2322 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2323 break; 2324 case IB_CM_MRA_REP_RCVD: 2325 break; 2326 case IB_CM_TIMEWAIT: 2327 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. 2328 counter[CM_DREQ_COUNTER]); 2329 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) 2330 goto unlock; 2331 2332 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, 2333 cm_id_priv->private_data, 2334 cm_id_priv->private_data_len); 2335 spin_unlock_irq(&cm_id_priv->lock); 2336 2337 if (ib_post_send_mad(msg, NULL)) 2338 cm_free_msg(msg); 2339 goto deref; 2340 case IB_CM_DREQ_RCVD: 2341 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. 2342 counter[CM_DREQ_COUNTER]); 2343 goto unlock; 2344 default: 2345 goto unlock; 2346 } 2347 cm_id_priv->id.state = IB_CM_DREQ_RCVD; 2348 cm_id_priv->tid = dreq_msg->hdr.tid; 2349 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2350 if (!ret) 2351 list_add_tail(&work->list, &cm_id_priv->work_list); 2352 spin_unlock_irq(&cm_id_priv->lock); 2353 2354 if (ret) 2355 cm_process_work(cm_id_priv, work); 2356 else 2357 cm_deref_id(cm_id_priv); 2358 return 0; 2359 2360 unlock: spin_unlock_irq(&cm_id_priv->lock); 2361 deref: cm_deref_id(cm_id_priv); 2362 return -EINVAL; 2363 } 2364 2365 static int cm_drep_handler(struct cm_work *work) 2366 { 2367 struct cm_id_private *cm_id_priv; 2368 struct cm_drep_msg *drep_msg; 2369 int ret; 2370 2371 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad; 2372 cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id, 2373 drep_msg->local_comm_id); 2374 if (!cm_id_priv) 2375 return -EINVAL; 2376 2377 work->cm_event.private_data = &drep_msg->private_data; 2378 2379 spin_lock_irq(&cm_id_priv->lock); 2380 if (cm_id_priv->id.state != IB_CM_DREQ_SENT && 2381 cm_id_priv->id.state != IB_CM_DREQ_RCVD) { 2382 spin_unlock_irq(&cm_id_priv->lock); 2383 goto out; 2384 } 2385 cm_enter_timewait(cm_id_priv); 2386 2387 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2388 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2389 if (!ret) 2390 list_add_tail(&work->list, &cm_id_priv->work_list); 2391 spin_unlock_irq(&cm_id_priv->lock); 2392 2393 if (ret) 2394 cm_process_work(cm_id_priv, work); 2395 else 2396 cm_deref_id(cm_id_priv); 2397 return 0; 2398 out: 2399 cm_deref_id(cm_id_priv); 2400 return -EINVAL; 2401 } 2402 2403 int ib_send_cm_rej(struct ib_cm_id *cm_id, 2404 enum ib_cm_rej_reason reason, 2405 void *ari, 2406 u8 ari_length, 2407 const void *private_data, 2408 u8 private_data_len) 2409 { 2410 struct cm_id_private *cm_id_priv; 2411 struct ib_mad_send_buf *msg; 2412 unsigned long flags; 2413 int ret; 2414 2415 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) || 2416 (ari && ari_length > IB_CM_REJ_ARI_LENGTH)) 2417 return -EINVAL; 2418 2419 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2420 2421 spin_lock_irqsave(&cm_id_priv->lock, flags); 2422 switch (cm_id->state) { 2423 case IB_CM_REQ_SENT: 2424 case IB_CM_MRA_REQ_RCVD: 2425 case IB_CM_REQ_RCVD: 2426 case IB_CM_MRA_REQ_SENT: 2427 case IB_CM_REP_RCVD: 2428 case IB_CM_MRA_REP_SENT: 2429 ret = cm_alloc_msg(cm_id_priv, &msg); 2430 if (!ret) 2431 cm_format_rej((struct cm_rej_msg *) msg->mad, 2432 cm_id_priv, reason, ari, ari_length, 2433 private_data, private_data_len); 2434 2435 cm_reset_to_idle(cm_id_priv); 2436 break; 2437 case IB_CM_REP_SENT: 2438 case IB_CM_MRA_REP_RCVD: 2439 ret = cm_alloc_msg(cm_id_priv, &msg); 2440 if (!ret) 2441 cm_format_rej((struct cm_rej_msg *) msg->mad, 2442 cm_id_priv, reason, ari, ari_length, 2443 private_data, private_data_len); 2444 2445 cm_enter_timewait(cm_id_priv); 2446 break; 2447 default: 2448 ret = -EINVAL; 2449 goto out; 2450 } 2451 2452 if (ret) 2453 goto out; 2454 2455 ret = ib_post_send_mad(msg, NULL); 2456 if (ret) 2457 cm_free_msg(msg); 2458 2459 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2460 return ret; 2461 } 2462 EXPORT_SYMBOL(ib_send_cm_rej); 2463 2464 static void cm_format_rej_event(struct cm_work *work) 2465 { 2466 struct cm_rej_msg *rej_msg; 2467 struct ib_cm_rej_event_param *param; 2468 2469 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; 2470 param = &work->cm_event.param.rej_rcvd; 2471 param->ari = rej_msg->ari; 2472 param->ari_length = cm_rej_get_reject_info_len(rej_msg); 2473 param->reason = __be16_to_cpu(rej_msg->reason); 2474 work->cm_event.private_data = &rej_msg->private_data; 2475 } 2476 2477 static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg) 2478 { 2479 struct cm_timewait_info *timewait_info; 2480 struct cm_id_private *cm_id_priv; 2481 __be32 remote_id; 2482 2483 remote_id = rej_msg->local_comm_id; 2484 2485 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) { 2486 spin_lock_irq(&cm.lock); 2487 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari), 2488 remote_id); 2489 if (!timewait_info) { 2490 spin_unlock_irq(&cm.lock); 2491 return NULL; 2492 } 2493 cm_id_priv = idr_find(&cm.local_id_table, (__force int) 2494 (timewait_info->work.local_id ^ 2495 cm.random_id_operand)); 2496 if (cm_id_priv) { 2497 if (cm_id_priv->id.remote_id == remote_id) 2498 atomic_inc(&cm_id_priv->refcount); 2499 else 2500 cm_id_priv = NULL; 2501 } 2502 spin_unlock_irq(&cm.lock); 2503 } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ) 2504 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0); 2505 else 2506 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id); 2507 2508 return cm_id_priv; 2509 } 2510 2511 static int cm_rej_handler(struct cm_work *work) 2512 { 2513 struct cm_id_private *cm_id_priv; 2514 struct cm_rej_msg *rej_msg; 2515 int ret; 2516 2517 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; 2518 cm_id_priv = cm_acquire_rejected_id(rej_msg); 2519 if (!cm_id_priv) 2520 return -EINVAL; 2521 2522 cm_format_rej_event(work); 2523 2524 spin_lock_irq(&cm_id_priv->lock); 2525 switch (cm_id_priv->id.state) { 2526 case IB_CM_REQ_SENT: 2527 case IB_CM_MRA_REQ_RCVD: 2528 case IB_CM_REP_SENT: 2529 case IB_CM_MRA_REP_RCVD: 2530 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2531 /* fall through */ 2532 case IB_CM_REQ_RCVD: 2533 case IB_CM_MRA_REQ_SENT: 2534 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN) 2535 cm_enter_timewait(cm_id_priv); 2536 else 2537 cm_reset_to_idle(cm_id_priv); 2538 break; 2539 case IB_CM_DREQ_SENT: 2540 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2541 /* fall through */ 2542 case IB_CM_REP_RCVD: 2543 case IB_CM_MRA_REP_SENT: 2544 cm_enter_timewait(cm_id_priv); 2545 break; 2546 case IB_CM_ESTABLISHED: 2547 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT || 2548 cm_id_priv->id.lap_state == IB_CM_LAP_SENT) { 2549 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT) 2550 ib_cancel_mad(cm_id_priv->av.port->mad_agent, 2551 cm_id_priv->msg); 2552 cm_enter_timewait(cm_id_priv); 2553 break; 2554 } 2555 /* fall through */ 2556 default: 2557 spin_unlock_irq(&cm_id_priv->lock); 2558 ret = -EINVAL; 2559 goto out; 2560 } 2561 2562 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2563 if (!ret) 2564 list_add_tail(&work->list, &cm_id_priv->work_list); 2565 spin_unlock_irq(&cm_id_priv->lock); 2566 2567 if (ret) 2568 cm_process_work(cm_id_priv, work); 2569 else 2570 cm_deref_id(cm_id_priv); 2571 return 0; 2572 out: 2573 cm_deref_id(cm_id_priv); 2574 return -EINVAL; 2575 } 2576 2577 int ib_send_cm_mra(struct ib_cm_id *cm_id, 2578 u8 service_timeout, 2579 const void *private_data, 2580 u8 private_data_len) 2581 { 2582 struct cm_id_private *cm_id_priv; 2583 struct ib_mad_send_buf *msg; 2584 enum ib_cm_state cm_state; 2585 enum ib_cm_lap_state lap_state; 2586 enum cm_msg_response msg_response; 2587 void *data; 2588 unsigned long flags; 2589 int ret; 2590 2591 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE) 2592 return -EINVAL; 2593 2594 data = cm_copy_private_data(private_data, private_data_len); 2595 if (IS_ERR(data)) 2596 return PTR_ERR(data); 2597 2598 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2599 2600 spin_lock_irqsave(&cm_id_priv->lock, flags); 2601 switch(cm_id_priv->id.state) { 2602 case IB_CM_REQ_RCVD: 2603 cm_state = IB_CM_MRA_REQ_SENT; 2604 lap_state = cm_id->lap_state; 2605 msg_response = CM_MSG_RESPONSE_REQ; 2606 break; 2607 case IB_CM_REP_RCVD: 2608 cm_state = IB_CM_MRA_REP_SENT; 2609 lap_state = cm_id->lap_state; 2610 msg_response = CM_MSG_RESPONSE_REP; 2611 break; 2612 case IB_CM_ESTABLISHED: 2613 if (cm_id->lap_state == IB_CM_LAP_RCVD) { 2614 cm_state = cm_id->state; 2615 lap_state = IB_CM_MRA_LAP_SENT; 2616 msg_response = CM_MSG_RESPONSE_OTHER; 2617 break; 2618 } 2619 default: 2620 ret = -EINVAL; 2621 goto error1; 2622 } 2623 2624 if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) { 2625 ret = cm_alloc_msg(cm_id_priv, &msg); 2626 if (ret) 2627 goto error1; 2628 2629 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2630 msg_response, service_timeout, 2631 private_data, private_data_len); 2632 ret = ib_post_send_mad(msg, NULL); 2633 if (ret) 2634 goto error2; 2635 } 2636 2637 cm_id->state = cm_state; 2638 cm_id->lap_state = lap_state; 2639 cm_id_priv->service_timeout = service_timeout; 2640 cm_set_private_data(cm_id_priv, data, private_data_len); 2641 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2642 return 0; 2643 2644 error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2645 kfree(data); 2646 return ret; 2647 2648 error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2649 kfree(data); 2650 cm_free_msg(msg); 2651 return ret; 2652 } 2653 EXPORT_SYMBOL(ib_send_cm_mra); 2654 2655 static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg) 2656 { 2657 switch (cm_mra_get_msg_mraed(mra_msg)) { 2658 case CM_MSG_RESPONSE_REQ: 2659 return cm_acquire_id(mra_msg->remote_comm_id, 0); 2660 case CM_MSG_RESPONSE_REP: 2661 case CM_MSG_RESPONSE_OTHER: 2662 return cm_acquire_id(mra_msg->remote_comm_id, 2663 mra_msg->local_comm_id); 2664 default: 2665 return NULL; 2666 } 2667 } 2668 2669 static int cm_mra_handler(struct cm_work *work) 2670 { 2671 struct cm_id_private *cm_id_priv; 2672 struct cm_mra_msg *mra_msg; 2673 int timeout, ret; 2674 2675 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad; 2676 cm_id_priv = cm_acquire_mraed_id(mra_msg); 2677 if (!cm_id_priv) 2678 return -EINVAL; 2679 2680 work->cm_event.private_data = &mra_msg->private_data; 2681 work->cm_event.param.mra_rcvd.service_timeout = 2682 cm_mra_get_service_timeout(mra_msg); 2683 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) + 2684 cm_convert_to_ms(cm_id_priv->av.timeout); 2685 2686 spin_lock_irq(&cm_id_priv->lock); 2687 switch (cm_id_priv->id.state) { 2688 case IB_CM_REQ_SENT: 2689 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ || 2690 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2691 cm_id_priv->msg, timeout)) 2692 goto out; 2693 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD; 2694 break; 2695 case IB_CM_REP_SENT: 2696 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP || 2697 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2698 cm_id_priv->msg, timeout)) 2699 goto out; 2700 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD; 2701 break; 2702 case IB_CM_ESTABLISHED: 2703 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER || 2704 cm_id_priv->id.lap_state != IB_CM_LAP_SENT || 2705 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2706 cm_id_priv->msg, timeout)) { 2707 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) 2708 atomic_long_inc(&work->port-> 2709 counter_group[CM_RECV_DUPLICATES]. 2710 counter[CM_MRA_COUNTER]); 2711 goto out; 2712 } 2713 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD; 2714 break; 2715 case IB_CM_MRA_REQ_RCVD: 2716 case IB_CM_MRA_REP_RCVD: 2717 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. 2718 counter[CM_MRA_COUNTER]); 2719 /* fall through */ 2720 default: 2721 goto out; 2722 } 2723 2724 cm_id_priv->msg->context[1] = (void *) (unsigned long) 2725 cm_id_priv->id.state; 2726 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2727 if (!ret) 2728 list_add_tail(&work->list, &cm_id_priv->work_list); 2729 spin_unlock_irq(&cm_id_priv->lock); 2730 2731 if (ret) 2732 cm_process_work(cm_id_priv, work); 2733 else 2734 cm_deref_id(cm_id_priv); 2735 return 0; 2736 out: 2737 spin_unlock_irq(&cm_id_priv->lock); 2738 cm_deref_id(cm_id_priv); 2739 return -EINVAL; 2740 } 2741 2742 static void cm_format_lap(struct cm_lap_msg *lap_msg, 2743 struct cm_id_private *cm_id_priv, 2744 struct ib_sa_path_rec *alternate_path, 2745 const void *private_data, 2746 u8 private_data_len) 2747 { 2748 cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID, 2749 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP)); 2750 lap_msg->local_comm_id = cm_id_priv->id.local_id; 2751 lap_msg->remote_comm_id = cm_id_priv->id.remote_id; 2752 cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn); 2753 /* todo: need remote CM response timeout */ 2754 cm_lap_set_remote_resp_timeout(lap_msg, 0x1F); 2755 lap_msg->alt_local_lid = alternate_path->slid; 2756 lap_msg->alt_remote_lid = alternate_path->dlid; 2757 lap_msg->alt_local_gid = alternate_path->sgid; 2758 lap_msg->alt_remote_gid = alternate_path->dgid; 2759 cm_lap_set_flow_label(lap_msg, alternate_path->flow_label); 2760 cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class); 2761 lap_msg->alt_hop_limit = alternate_path->hop_limit; 2762 cm_lap_set_packet_rate(lap_msg, alternate_path->rate); 2763 cm_lap_set_sl(lap_msg, alternate_path->sl); 2764 cm_lap_set_subnet_local(lap_msg, 1); /* local only... */ 2765 cm_lap_set_local_ack_timeout(lap_msg, 2766 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay, 2767 alternate_path->packet_life_time)); 2768 2769 if (private_data && private_data_len) 2770 memcpy(lap_msg->private_data, private_data, private_data_len); 2771 } 2772 2773 int ib_send_cm_lap(struct ib_cm_id *cm_id, 2774 struct ib_sa_path_rec *alternate_path, 2775 const void *private_data, 2776 u8 private_data_len) 2777 { 2778 struct cm_id_private *cm_id_priv; 2779 struct ib_mad_send_buf *msg; 2780 unsigned long flags; 2781 int ret; 2782 2783 if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE) 2784 return -EINVAL; 2785 2786 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2787 spin_lock_irqsave(&cm_id_priv->lock, flags); 2788 if (cm_id->state != IB_CM_ESTABLISHED || 2789 (cm_id->lap_state != IB_CM_LAP_UNINIT && 2790 cm_id->lap_state != IB_CM_LAP_IDLE)) { 2791 ret = -EINVAL; 2792 goto out; 2793 } 2794 2795 ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av, 2796 cm_id_priv); 2797 if (ret) 2798 goto out; 2799 cm_id_priv->alt_av.timeout = 2800 cm_ack_timeout(cm_id_priv->target_ack_delay, 2801 cm_id_priv->alt_av.timeout - 1); 2802 2803 ret = cm_alloc_msg(cm_id_priv, &msg); 2804 if (ret) 2805 goto out; 2806 2807 cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv, 2808 alternate_path, private_data, private_data_len); 2809 msg->timeout_ms = cm_id_priv->timeout_ms; 2810 msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED; 2811 2812 ret = ib_post_send_mad(msg, NULL); 2813 if (ret) { 2814 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2815 cm_free_msg(msg); 2816 return ret; 2817 } 2818 2819 cm_id->lap_state = IB_CM_LAP_SENT; 2820 cm_id_priv->msg = msg; 2821 2822 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2823 return ret; 2824 } 2825 EXPORT_SYMBOL(ib_send_cm_lap); 2826 2827 static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv, 2828 struct ib_sa_path_rec *path, 2829 struct cm_lap_msg *lap_msg) 2830 { 2831 memset(path, 0, sizeof *path); 2832 path->dgid = lap_msg->alt_local_gid; 2833 path->sgid = lap_msg->alt_remote_gid; 2834 path->dlid = lap_msg->alt_local_lid; 2835 path->slid = lap_msg->alt_remote_lid; 2836 path->flow_label = cm_lap_get_flow_label(lap_msg); 2837 path->hop_limit = lap_msg->alt_hop_limit; 2838 path->traffic_class = cm_lap_get_traffic_class(lap_msg); 2839 path->reversible = 1; 2840 path->pkey = cm_id_priv->pkey; 2841 path->sl = cm_lap_get_sl(lap_msg); 2842 path->mtu_selector = IB_SA_EQ; 2843 path->mtu = cm_id_priv->path_mtu; 2844 path->rate_selector = IB_SA_EQ; 2845 path->rate = cm_lap_get_packet_rate(lap_msg); 2846 path->packet_life_time_selector = IB_SA_EQ; 2847 path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg); 2848 path->packet_life_time -= (path->packet_life_time > 0); 2849 } 2850 2851 static int cm_lap_handler(struct cm_work *work) 2852 { 2853 struct cm_id_private *cm_id_priv; 2854 struct cm_lap_msg *lap_msg; 2855 struct ib_cm_lap_event_param *param; 2856 struct ib_mad_send_buf *msg = NULL; 2857 int ret; 2858 2859 /* todo: verify LAP request and send reject APR if invalid. */ 2860 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad; 2861 cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id, 2862 lap_msg->local_comm_id); 2863 if (!cm_id_priv) 2864 return -EINVAL; 2865 2866 param = &work->cm_event.param.lap_rcvd; 2867 param->alternate_path = &work->path[0]; 2868 cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg); 2869 work->cm_event.private_data = &lap_msg->private_data; 2870 2871 spin_lock_irq(&cm_id_priv->lock); 2872 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) 2873 goto unlock; 2874 2875 switch (cm_id_priv->id.lap_state) { 2876 case IB_CM_LAP_UNINIT: 2877 case IB_CM_LAP_IDLE: 2878 break; 2879 case IB_CM_MRA_LAP_SENT: 2880 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. 2881 counter[CM_LAP_COUNTER]); 2882 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) 2883 goto unlock; 2884 2885 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2886 CM_MSG_RESPONSE_OTHER, 2887 cm_id_priv->service_timeout, 2888 cm_id_priv->private_data, 2889 cm_id_priv->private_data_len); 2890 spin_unlock_irq(&cm_id_priv->lock); 2891 2892 if (ib_post_send_mad(msg, NULL)) 2893 cm_free_msg(msg); 2894 goto deref; 2895 case IB_CM_LAP_RCVD: 2896 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. 2897 counter[CM_LAP_COUNTER]); 2898 goto unlock; 2899 default: 2900 goto unlock; 2901 } 2902 2903 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD; 2904 cm_id_priv->tid = lap_msg->hdr.tid; 2905 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 2906 work->mad_recv_wc->recv_buf.grh, 2907 &cm_id_priv->av); 2908 cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av, 2909 cm_id_priv); 2910 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2911 if (!ret) 2912 list_add_tail(&work->list, &cm_id_priv->work_list); 2913 spin_unlock_irq(&cm_id_priv->lock); 2914 2915 if (ret) 2916 cm_process_work(cm_id_priv, work); 2917 else 2918 cm_deref_id(cm_id_priv); 2919 return 0; 2920 2921 unlock: spin_unlock_irq(&cm_id_priv->lock); 2922 deref: cm_deref_id(cm_id_priv); 2923 return -EINVAL; 2924 } 2925 2926 static void cm_format_apr(struct cm_apr_msg *apr_msg, 2927 struct cm_id_private *cm_id_priv, 2928 enum ib_cm_apr_status status, 2929 void *info, 2930 u8 info_length, 2931 const void *private_data, 2932 u8 private_data_len) 2933 { 2934 cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid); 2935 apr_msg->local_comm_id = cm_id_priv->id.local_id; 2936 apr_msg->remote_comm_id = cm_id_priv->id.remote_id; 2937 apr_msg->ap_status = (u8) status; 2938 2939 if (info && info_length) { 2940 apr_msg->info_length = info_length; 2941 memcpy(apr_msg->info, info, info_length); 2942 } 2943 2944 if (private_data && private_data_len) 2945 memcpy(apr_msg->private_data, private_data, private_data_len); 2946 } 2947 2948 int ib_send_cm_apr(struct ib_cm_id *cm_id, 2949 enum ib_cm_apr_status status, 2950 void *info, 2951 u8 info_length, 2952 const void *private_data, 2953 u8 private_data_len) 2954 { 2955 struct cm_id_private *cm_id_priv; 2956 struct ib_mad_send_buf *msg; 2957 unsigned long flags; 2958 int ret; 2959 2960 if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) || 2961 (info && info_length > IB_CM_APR_INFO_LENGTH)) 2962 return -EINVAL; 2963 2964 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2965 spin_lock_irqsave(&cm_id_priv->lock, flags); 2966 if (cm_id->state != IB_CM_ESTABLISHED || 2967 (cm_id->lap_state != IB_CM_LAP_RCVD && 2968 cm_id->lap_state != IB_CM_MRA_LAP_SENT)) { 2969 ret = -EINVAL; 2970 goto out; 2971 } 2972 2973 ret = cm_alloc_msg(cm_id_priv, &msg); 2974 if (ret) 2975 goto out; 2976 2977 cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status, 2978 info, info_length, private_data, private_data_len); 2979 ret = ib_post_send_mad(msg, NULL); 2980 if (ret) { 2981 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2982 cm_free_msg(msg); 2983 return ret; 2984 } 2985 2986 cm_id->lap_state = IB_CM_LAP_IDLE; 2987 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2988 return ret; 2989 } 2990 EXPORT_SYMBOL(ib_send_cm_apr); 2991 2992 static int cm_apr_handler(struct cm_work *work) 2993 { 2994 struct cm_id_private *cm_id_priv; 2995 struct cm_apr_msg *apr_msg; 2996 int ret; 2997 2998 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad; 2999 cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id, 3000 apr_msg->local_comm_id); 3001 if (!cm_id_priv) 3002 return -EINVAL; /* Unmatched reply. */ 3003 3004 work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status; 3005 work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info; 3006 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length; 3007 work->cm_event.private_data = &apr_msg->private_data; 3008 3009 spin_lock_irq(&cm_id_priv->lock); 3010 if (cm_id_priv->id.state != IB_CM_ESTABLISHED || 3011 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT && 3012 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) { 3013 spin_unlock_irq(&cm_id_priv->lock); 3014 goto out; 3015 } 3016 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE; 3017 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 3018 cm_id_priv->msg = NULL; 3019 3020 ret = atomic_inc_and_test(&cm_id_priv->work_count); 3021 if (!ret) 3022 list_add_tail(&work->list, &cm_id_priv->work_list); 3023 spin_unlock_irq(&cm_id_priv->lock); 3024 3025 if (ret) 3026 cm_process_work(cm_id_priv, work); 3027 else 3028 cm_deref_id(cm_id_priv); 3029 return 0; 3030 out: 3031 cm_deref_id(cm_id_priv); 3032 return -EINVAL; 3033 } 3034 3035 static int cm_timewait_handler(struct cm_work *work) 3036 { 3037 struct cm_timewait_info *timewait_info; 3038 struct cm_id_private *cm_id_priv; 3039 int ret; 3040 3041 timewait_info = (struct cm_timewait_info *)work; 3042 spin_lock_irq(&cm.lock); 3043 list_del(&timewait_info->list); 3044 spin_unlock_irq(&cm.lock); 3045 3046 cm_id_priv = cm_acquire_id(timewait_info->work.local_id, 3047 timewait_info->work.remote_id); 3048 if (!cm_id_priv) 3049 return -EINVAL; 3050 3051 spin_lock_irq(&cm_id_priv->lock); 3052 if (cm_id_priv->id.state != IB_CM_TIMEWAIT || 3053 cm_id_priv->remote_qpn != timewait_info->remote_qpn) { 3054 spin_unlock_irq(&cm_id_priv->lock); 3055 goto out; 3056 } 3057 cm_id_priv->id.state = IB_CM_IDLE; 3058 ret = atomic_inc_and_test(&cm_id_priv->work_count); 3059 if (!ret) 3060 list_add_tail(&work->list, &cm_id_priv->work_list); 3061 spin_unlock_irq(&cm_id_priv->lock); 3062 3063 if (ret) 3064 cm_process_work(cm_id_priv, work); 3065 else 3066 cm_deref_id(cm_id_priv); 3067 return 0; 3068 out: 3069 cm_deref_id(cm_id_priv); 3070 return -EINVAL; 3071 } 3072 3073 static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg, 3074 struct cm_id_private *cm_id_priv, 3075 struct ib_cm_sidr_req_param *param) 3076 { 3077 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID, 3078 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR)); 3079 sidr_req_msg->request_id = cm_id_priv->id.local_id; 3080 sidr_req_msg->pkey = param->path->pkey; 3081 sidr_req_msg->service_id = param->service_id; 3082 3083 if (param->private_data && param->private_data_len) 3084 memcpy(sidr_req_msg->private_data, param->private_data, 3085 param->private_data_len); 3086 } 3087 3088 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, 3089 struct ib_cm_sidr_req_param *param) 3090 { 3091 struct cm_id_private *cm_id_priv; 3092 struct ib_mad_send_buf *msg; 3093 unsigned long flags; 3094 int ret; 3095 3096 if (!param->path || (param->private_data && 3097 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE)) 3098 return -EINVAL; 3099 3100 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3101 ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv); 3102 if (ret) 3103 goto out; 3104 3105 cm_id->service_id = param->service_id; 3106 cm_id->service_mask = ~cpu_to_be64(0); 3107 cm_id_priv->timeout_ms = param->timeout_ms; 3108 cm_id_priv->max_cm_retries = param->max_cm_retries; 3109 ret = cm_alloc_msg(cm_id_priv, &msg); 3110 if (ret) 3111 goto out; 3112 3113 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv, 3114 param); 3115 msg->timeout_ms = cm_id_priv->timeout_ms; 3116 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT; 3117 3118 spin_lock_irqsave(&cm_id_priv->lock, flags); 3119 if (cm_id->state == IB_CM_IDLE) 3120 ret = ib_post_send_mad(msg, NULL); 3121 else 3122 ret = -EINVAL; 3123 3124 if (ret) { 3125 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3126 cm_free_msg(msg); 3127 goto out; 3128 } 3129 cm_id->state = IB_CM_SIDR_REQ_SENT; 3130 cm_id_priv->msg = msg; 3131 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3132 out: 3133 return ret; 3134 } 3135 EXPORT_SYMBOL(ib_send_cm_sidr_req); 3136 3137 static void cm_format_sidr_req_event(struct cm_work *work, 3138 struct ib_cm_id *listen_id) 3139 { 3140 struct cm_sidr_req_msg *sidr_req_msg; 3141 struct ib_cm_sidr_req_event_param *param; 3142 3143 sidr_req_msg = (struct cm_sidr_req_msg *) 3144 work->mad_recv_wc->recv_buf.mad; 3145 param = &work->cm_event.param.sidr_req_rcvd; 3146 param->pkey = __be16_to_cpu(sidr_req_msg->pkey); 3147 param->listen_id = listen_id; 3148 param->service_id = sidr_req_msg->service_id; 3149 param->bth_pkey = cm_get_bth_pkey(work); 3150 param->port = work->port->port_num; 3151 work->cm_event.private_data = &sidr_req_msg->private_data; 3152 } 3153 3154 static int cm_sidr_req_handler(struct cm_work *work) 3155 { 3156 struct ib_cm_id *cm_id; 3157 struct cm_id_private *cm_id_priv, *cur_cm_id_priv; 3158 struct cm_sidr_req_msg *sidr_req_msg; 3159 struct ib_wc *wc; 3160 3161 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL); 3162 if (IS_ERR(cm_id)) 3163 return PTR_ERR(cm_id); 3164 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3165 3166 /* Record SGID/SLID and request ID for lookup. */ 3167 sidr_req_msg = (struct cm_sidr_req_msg *) 3168 work->mad_recv_wc->recv_buf.mad; 3169 wc = work->mad_recv_wc->wc; 3170 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid); 3171 cm_id_priv->av.dgid.global.interface_id = 0; 3172 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 3173 work->mad_recv_wc->recv_buf.grh, 3174 &cm_id_priv->av); 3175 cm_id_priv->id.remote_id = sidr_req_msg->request_id; 3176 cm_id_priv->tid = sidr_req_msg->hdr.tid; 3177 atomic_inc(&cm_id_priv->work_count); 3178 3179 spin_lock_irq(&cm.lock); 3180 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv); 3181 if (cur_cm_id_priv) { 3182 spin_unlock_irq(&cm.lock); 3183 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. 3184 counter[CM_SIDR_REQ_COUNTER]); 3185 goto out; /* Duplicate message. */ 3186 } 3187 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD; 3188 cur_cm_id_priv = cm_find_listen(cm_id->device, 3189 sidr_req_msg->service_id); 3190 if (!cur_cm_id_priv) { 3191 spin_unlock_irq(&cm.lock); 3192 cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED); 3193 goto out; /* No match. */ 3194 } 3195 atomic_inc(&cur_cm_id_priv->refcount); 3196 atomic_inc(&cm_id_priv->refcount); 3197 spin_unlock_irq(&cm.lock); 3198 3199 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; 3200 cm_id_priv->id.context = cur_cm_id_priv->id.context; 3201 cm_id_priv->id.service_id = sidr_req_msg->service_id; 3202 cm_id_priv->id.service_mask = ~cpu_to_be64(0); 3203 3204 cm_format_sidr_req_event(work, &cur_cm_id_priv->id); 3205 cm_process_work(cm_id_priv, work); 3206 cm_deref_id(cur_cm_id_priv); 3207 return 0; 3208 out: 3209 ib_destroy_cm_id(&cm_id_priv->id); 3210 return -EINVAL; 3211 } 3212 3213 static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg, 3214 struct cm_id_private *cm_id_priv, 3215 struct ib_cm_sidr_rep_param *param) 3216 { 3217 cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID, 3218 cm_id_priv->tid); 3219 sidr_rep_msg->request_id = cm_id_priv->id.remote_id; 3220 sidr_rep_msg->status = param->status; 3221 cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num)); 3222 sidr_rep_msg->service_id = cm_id_priv->id.service_id; 3223 sidr_rep_msg->qkey = cpu_to_be32(param->qkey); 3224 3225 if (param->info && param->info_length) 3226 memcpy(sidr_rep_msg->info, param->info, param->info_length); 3227 3228 if (param->private_data && param->private_data_len) 3229 memcpy(sidr_rep_msg->private_data, param->private_data, 3230 param->private_data_len); 3231 } 3232 3233 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, 3234 struct ib_cm_sidr_rep_param *param) 3235 { 3236 struct cm_id_private *cm_id_priv; 3237 struct ib_mad_send_buf *msg; 3238 unsigned long flags; 3239 int ret; 3240 3241 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) || 3242 (param->private_data && 3243 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE)) 3244 return -EINVAL; 3245 3246 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3247 spin_lock_irqsave(&cm_id_priv->lock, flags); 3248 if (cm_id->state != IB_CM_SIDR_REQ_RCVD) { 3249 ret = -EINVAL; 3250 goto error; 3251 } 3252 3253 ret = cm_alloc_msg(cm_id_priv, &msg); 3254 if (ret) 3255 goto error; 3256 3257 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv, 3258 param); 3259 ret = ib_post_send_mad(msg, NULL); 3260 if (ret) { 3261 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3262 cm_free_msg(msg); 3263 return ret; 3264 } 3265 cm_id->state = IB_CM_IDLE; 3266 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3267 3268 spin_lock_irqsave(&cm.lock, flags); 3269 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) { 3270 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 3271 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node); 3272 } 3273 spin_unlock_irqrestore(&cm.lock, flags); 3274 return 0; 3275 3276 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3277 return ret; 3278 } 3279 EXPORT_SYMBOL(ib_send_cm_sidr_rep); 3280 3281 static void cm_format_sidr_rep_event(struct cm_work *work) 3282 { 3283 struct cm_sidr_rep_msg *sidr_rep_msg; 3284 struct ib_cm_sidr_rep_event_param *param; 3285 3286 sidr_rep_msg = (struct cm_sidr_rep_msg *) 3287 work->mad_recv_wc->recv_buf.mad; 3288 param = &work->cm_event.param.sidr_rep_rcvd; 3289 param->status = sidr_rep_msg->status; 3290 param->qkey = be32_to_cpu(sidr_rep_msg->qkey); 3291 param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg)); 3292 param->info = &sidr_rep_msg->info; 3293 param->info_len = sidr_rep_msg->info_length; 3294 work->cm_event.private_data = &sidr_rep_msg->private_data; 3295 } 3296 3297 static int cm_sidr_rep_handler(struct cm_work *work) 3298 { 3299 struct cm_sidr_rep_msg *sidr_rep_msg; 3300 struct cm_id_private *cm_id_priv; 3301 3302 sidr_rep_msg = (struct cm_sidr_rep_msg *) 3303 work->mad_recv_wc->recv_buf.mad; 3304 cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0); 3305 if (!cm_id_priv) 3306 return -EINVAL; /* Unmatched reply. */ 3307 3308 spin_lock_irq(&cm_id_priv->lock); 3309 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) { 3310 spin_unlock_irq(&cm_id_priv->lock); 3311 goto out; 3312 } 3313 cm_id_priv->id.state = IB_CM_IDLE; 3314 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 3315 spin_unlock_irq(&cm_id_priv->lock); 3316 3317 cm_format_sidr_rep_event(work); 3318 cm_process_work(cm_id_priv, work); 3319 return 0; 3320 out: 3321 cm_deref_id(cm_id_priv); 3322 return -EINVAL; 3323 } 3324 3325 static void cm_process_send_error(struct ib_mad_send_buf *msg, 3326 enum ib_wc_status wc_status) 3327 { 3328 struct cm_id_private *cm_id_priv; 3329 struct ib_cm_event cm_event; 3330 enum ib_cm_state state; 3331 int ret; 3332 3333 memset(&cm_event, 0, sizeof cm_event); 3334 cm_id_priv = msg->context[0]; 3335 3336 /* Discard old sends or ones without a response. */ 3337 spin_lock_irq(&cm_id_priv->lock); 3338 state = (enum ib_cm_state) (unsigned long) msg->context[1]; 3339 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state) 3340 goto discard; 3341 3342 switch (state) { 3343 case IB_CM_REQ_SENT: 3344 case IB_CM_MRA_REQ_RCVD: 3345 cm_reset_to_idle(cm_id_priv); 3346 cm_event.event = IB_CM_REQ_ERROR; 3347 break; 3348 case IB_CM_REP_SENT: 3349 case IB_CM_MRA_REP_RCVD: 3350 cm_reset_to_idle(cm_id_priv); 3351 cm_event.event = IB_CM_REP_ERROR; 3352 break; 3353 case IB_CM_DREQ_SENT: 3354 cm_enter_timewait(cm_id_priv); 3355 cm_event.event = IB_CM_DREQ_ERROR; 3356 break; 3357 case IB_CM_SIDR_REQ_SENT: 3358 cm_id_priv->id.state = IB_CM_IDLE; 3359 cm_event.event = IB_CM_SIDR_REQ_ERROR; 3360 break; 3361 default: 3362 goto discard; 3363 } 3364 spin_unlock_irq(&cm_id_priv->lock); 3365 cm_event.param.send_status = wc_status; 3366 3367 /* No other events can occur on the cm_id at this point. */ 3368 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event); 3369 cm_free_msg(msg); 3370 if (ret) 3371 ib_destroy_cm_id(&cm_id_priv->id); 3372 return; 3373 discard: 3374 spin_unlock_irq(&cm_id_priv->lock); 3375 cm_free_msg(msg); 3376 } 3377 3378 static void cm_send_handler(struct ib_mad_agent *mad_agent, 3379 struct ib_mad_send_wc *mad_send_wc) 3380 { 3381 struct ib_mad_send_buf *msg = mad_send_wc->send_buf; 3382 struct cm_port *port; 3383 u16 attr_index; 3384 3385 port = mad_agent->context; 3386 attr_index = be16_to_cpu(((struct ib_mad_hdr *) 3387 msg->mad)->attr_id) - CM_ATTR_ID_OFFSET; 3388 3389 /* 3390 * If the send was in response to a received message (context[0] is not 3391 * set to a cm_id), and is not a REJ, then it is a send that was 3392 * manually retried. 3393 */ 3394 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER)) 3395 msg->retries = 1; 3396 3397 atomic_long_add(1 + msg->retries, 3398 &port->counter_group[CM_XMIT].counter[attr_index]); 3399 if (msg->retries) 3400 atomic_long_add(msg->retries, 3401 &port->counter_group[CM_XMIT_RETRIES]. 3402 counter[attr_index]); 3403 3404 switch (mad_send_wc->status) { 3405 case IB_WC_SUCCESS: 3406 case IB_WC_WR_FLUSH_ERR: 3407 cm_free_msg(msg); 3408 break; 3409 default: 3410 if (msg->context[0] && msg->context[1]) 3411 cm_process_send_error(msg, mad_send_wc->status); 3412 else 3413 cm_free_msg(msg); 3414 break; 3415 } 3416 } 3417 3418 static void cm_work_handler(struct work_struct *_work) 3419 { 3420 struct cm_work *work = container_of(_work, struct cm_work, work.work); 3421 int ret; 3422 3423 switch (work->cm_event.event) { 3424 case IB_CM_REQ_RECEIVED: 3425 ret = cm_req_handler(work); 3426 break; 3427 case IB_CM_MRA_RECEIVED: 3428 ret = cm_mra_handler(work); 3429 break; 3430 case IB_CM_REJ_RECEIVED: 3431 ret = cm_rej_handler(work); 3432 break; 3433 case IB_CM_REP_RECEIVED: 3434 ret = cm_rep_handler(work); 3435 break; 3436 case IB_CM_RTU_RECEIVED: 3437 ret = cm_rtu_handler(work); 3438 break; 3439 case IB_CM_USER_ESTABLISHED: 3440 ret = cm_establish_handler(work); 3441 break; 3442 case IB_CM_DREQ_RECEIVED: 3443 ret = cm_dreq_handler(work); 3444 break; 3445 case IB_CM_DREP_RECEIVED: 3446 ret = cm_drep_handler(work); 3447 break; 3448 case IB_CM_SIDR_REQ_RECEIVED: 3449 ret = cm_sidr_req_handler(work); 3450 break; 3451 case IB_CM_SIDR_REP_RECEIVED: 3452 ret = cm_sidr_rep_handler(work); 3453 break; 3454 case IB_CM_LAP_RECEIVED: 3455 ret = cm_lap_handler(work); 3456 break; 3457 case IB_CM_APR_RECEIVED: 3458 ret = cm_apr_handler(work); 3459 break; 3460 case IB_CM_TIMEWAIT_EXIT: 3461 ret = cm_timewait_handler(work); 3462 break; 3463 default: 3464 ret = -EINVAL; 3465 break; 3466 } 3467 if (ret) 3468 cm_free_work(work); 3469 } 3470 3471 static int cm_establish(struct ib_cm_id *cm_id) 3472 { 3473 struct cm_id_private *cm_id_priv; 3474 struct cm_work *work; 3475 unsigned long flags; 3476 int ret = 0; 3477 struct cm_device *cm_dev; 3478 3479 cm_dev = ib_get_client_data(cm_id->device, &cm_client); 3480 if (!cm_dev) 3481 return -ENODEV; 3482 3483 work = kmalloc(sizeof *work, GFP_ATOMIC); 3484 if (!work) 3485 return -ENOMEM; 3486 3487 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3488 spin_lock_irqsave(&cm_id_priv->lock, flags); 3489 switch (cm_id->state) 3490 { 3491 case IB_CM_REP_SENT: 3492 case IB_CM_MRA_REP_RCVD: 3493 cm_id->state = IB_CM_ESTABLISHED; 3494 break; 3495 case IB_CM_ESTABLISHED: 3496 ret = -EISCONN; 3497 break; 3498 default: 3499 ret = -EINVAL; 3500 break; 3501 } 3502 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3503 3504 if (ret) { 3505 kfree(work); 3506 goto out; 3507 } 3508 3509 /* 3510 * The CM worker thread may try to destroy the cm_id before it 3511 * can execute this work item. To prevent potential deadlock, 3512 * we need to find the cm_id once we're in the context of the 3513 * worker thread, rather than holding a reference on it. 3514 */ 3515 INIT_DELAYED_WORK(&work->work, cm_work_handler); 3516 work->local_id = cm_id->local_id; 3517 work->remote_id = cm_id->remote_id; 3518 work->mad_recv_wc = NULL; 3519 work->cm_event.event = IB_CM_USER_ESTABLISHED; 3520 3521 /* Check if the device started its remove_one */ 3522 spin_lock_irqsave(&cm.lock, flags); 3523 if (!cm_dev->going_down) { 3524 queue_delayed_work(cm.wq, &work->work, 0); 3525 } else { 3526 kfree(work); 3527 ret = -ENODEV; 3528 } 3529 spin_unlock_irqrestore(&cm.lock, flags); 3530 3531 out: 3532 return ret; 3533 } 3534 3535 static int cm_migrate(struct ib_cm_id *cm_id) 3536 { 3537 struct cm_id_private *cm_id_priv; 3538 struct cm_av tmp_av; 3539 unsigned long flags; 3540 int tmp_send_port_not_ready; 3541 int ret = 0; 3542 3543 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3544 spin_lock_irqsave(&cm_id_priv->lock, flags); 3545 if (cm_id->state == IB_CM_ESTABLISHED && 3546 (cm_id->lap_state == IB_CM_LAP_UNINIT || 3547 cm_id->lap_state == IB_CM_LAP_IDLE)) { 3548 cm_id->lap_state = IB_CM_LAP_IDLE; 3549 /* Swap address vector */ 3550 tmp_av = cm_id_priv->av; 3551 cm_id_priv->av = cm_id_priv->alt_av; 3552 cm_id_priv->alt_av = tmp_av; 3553 /* Swap port send ready state */ 3554 tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready; 3555 cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready; 3556 cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready; 3557 } else 3558 ret = -EINVAL; 3559 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3560 3561 return ret; 3562 } 3563 3564 int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event) 3565 { 3566 int ret; 3567 3568 switch (event) { 3569 case IB_EVENT_COMM_EST: 3570 ret = cm_establish(cm_id); 3571 break; 3572 case IB_EVENT_PATH_MIG: 3573 ret = cm_migrate(cm_id); 3574 break; 3575 default: 3576 ret = -EINVAL; 3577 } 3578 return ret; 3579 } 3580 EXPORT_SYMBOL(ib_cm_notify); 3581 3582 static void cm_recv_handler(struct ib_mad_agent *mad_agent, 3583 struct ib_mad_send_buf *send_buf, 3584 struct ib_mad_recv_wc *mad_recv_wc) 3585 { 3586 struct cm_port *port = mad_agent->context; 3587 struct cm_work *work; 3588 enum ib_cm_event_type event; 3589 u16 attr_id; 3590 int paths = 0; 3591 int going_down = 0; 3592 3593 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) { 3594 case CM_REQ_ATTR_ID: 3595 paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)-> 3596 alt_local_lid != 0); 3597 event = IB_CM_REQ_RECEIVED; 3598 break; 3599 case CM_MRA_ATTR_ID: 3600 event = IB_CM_MRA_RECEIVED; 3601 break; 3602 case CM_REJ_ATTR_ID: 3603 event = IB_CM_REJ_RECEIVED; 3604 break; 3605 case CM_REP_ATTR_ID: 3606 event = IB_CM_REP_RECEIVED; 3607 break; 3608 case CM_RTU_ATTR_ID: 3609 event = IB_CM_RTU_RECEIVED; 3610 break; 3611 case CM_DREQ_ATTR_ID: 3612 event = IB_CM_DREQ_RECEIVED; 3613 break; 3614 case CM_DREP_ATTR_ID: 3615 event = IB_CM_DREP_RECEIVED; 3616 break; 3617 case CM_SIDR_REQ_ATTR_ID: 3618 event = IB_CM_SIDR_REQ_RECEIVED; 3619 break; 3620 case CM_SIDR_REP_ATTR_ID: 3621 event = IB_CM_SIDR_REP_RECEIVED; 3622 break; 3623 case CM_LAP_ATTR_ID: 3624 paths = 1; 3625 event = IB_CM_LAP_RECEIVED; 3626 break; 3627 case CM_APR_ATTR_ID: 3628 event = IB_CM_APR_RECEIVED; 3629 break; 3630 default: 3631 ib_free_recv_mad(mad_recv_wc); 3632 return; 3633 } 3634 3635 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id); 3636 atomic_long_inc(&port->counter_group[CM_RECV]. 3637 counter[attr_id - CM_ATTR_ID_OFFSET]); 3638 3639 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths, 3640 GFP_KERNEL); 3641 if (!work) { 3642 ib_free_recv_mad(mad_recv_wc); 3643 return; 3644 } 3645 3646 INIT_DELAYED_WORK(&work->work, cm_work_handler); 3647 work->cm_event.event = event; 3648 work->mad_recv_wc = mad_recv_wc; 3649 work->port = port; 3650 3651 /* Check if the device started its remove_one */ 3652 spin_lock_irq(&cm.lock); 3653 if (!port->cm_dev->going_down) 3654 queue_delayed_work(cm.wq, &work->work, 0); 3655 else 3656 going_down = 1; 3657 spin_unlock_irq(&cm.lock); 3658 3659 if (going_down) { 3660 kfree(work); 3661 ib_free_recv_mad(mad_recv_wc); 3662 } 3663 } 3664 3665 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, 3666 struct ib_qp_attr *qp_attr, 3667 int *qp_attr_mask) 3668 { 3669 unsigned long flags; 3670 int ret; 3671 3672 spin_lock_irqsave(&cm_id_priv->lock, flags); 3673 switch (cm_id_priv->id.state) { 3674 case IB_CM_REQ_SENT: 3675 case IB_CM_MRA_REQ_RCVD: 3676 case IB_CM_REQ_RCVD: 3677 case IB_CM_MRA_REQ_SENT: 3678 case IB_CM_REP_RCVD: 3679 case IB_CM_MRA_REP_SENT: 3680 case IB_CM_REP_SENT: 3681 case IB_CM_MRA_REP_RCVD: 3682 case IB_CM_ESTABLISHED: 3683 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | 3684 IB_QP_PKEY_INDEX | IB_QP_PORT; 3685 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE; 3686 if (cm_id_priv->responder_resources) 3687 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ | 3688 IB_ACCESS_REMOTE_ATOMIC; 3689 qp_attr->pkey_index = cm_id_priv->av.pkey_index; 3690 qp_attr->port_num = cm_id_priv->av.port->port_num; 3691 ret = 0; 3692 break; 3693 default: 3694 ret = -EINVAL; 3695 break; 3696 } 3697 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3698 return ret; 3699 } 3700 3701 static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv, 3702 struct ib_qp_attr *qp_attr, 3703 int *qp_attr_mask) 3704 { 3705 unsigned long flags; 3706 int ret; 3707 3708 spin_lock_irqsave(&cm_id_priv->lock, flags); 3709 switch (cm_id_priv->id.state) { 3710 case IB_CM_REQ_RCVD: 3711 case IB_CM_MRA_REQ_SENT: 3712 case IB_CM_REP_RCVD: 3713 case IB_CM_MRA_REP_SENT: 3714 case IB_CM_REP_SENT: 3715 case IB_CM_MRA_REP_RCVD: 3716 case IB_CM_ESTABLISHED: 3717 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | 3718 IB_QP_DEST_QPN | IB_QP_RQ_PSN; 3719 qp_attr->ah_attr = cm_id_priv->av.ah_attr; 3720 qp_attr->path_mtu = cm_id_priv->path_mtu; 3721 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn); 3722 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn); 3723 if (cm_id_priv->qp_type == IB_QPT_RC || 3724 cm_id_priv->qp_type == IB_QPT_XRC_TGT) { 3725 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC | 3726 IB_QP_MIN_RNR_TIMER; 3727 qp_attr->max_dest_rd_atomic = 3728 cm_id_priv->responder_resources; 3729 qp_attr->min_rnr_timer = 0; 3730 } 3731 if (cm_id_priv->alt_av.ah_attr.dlid) { 3732 *qp_attr_mask |= IB_QP_ALT_PATH; 3733 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; 3734 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index; 3735 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout; 3736 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; 3737 } 3738 ret = 0; 3739 break; 3740 default: 3741 ret = -EINVAL; 3742 break; 3743 } 3744 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3745 return ret; 3746 } 3747 3748 static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv, 3749 struct ib_qp_attr *qp_attr, 3750 int *qp_attr_mask) 3751 { 3752 unsigned long flags; 3753 int ret; 3754 3755 spin_lock_irqsave(&cm_id_priv->lock, flags); 3756 switch (cm_id_priv->id.state) { 3757 /* Allow transition to RTS before sending REP */ 3758 case IB_CM_REQ_RCVD: 3759 case IB_CM_MRA_REQ_SENT: 3760 3761 case IB_CM_REP_RCVD: 3762 case IB_CM_MRA_REP_SENT: 3763 case IB_CM_REP_SENT: 3764 case IB_CM_MRA_REP_RCVD: 3765 case IB_CM_ESTABLISHED: 3766 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) { 3767 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN; 3768 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn); 3769 switch (cm_id_priv->qp_type) { 3770 case IB_QPT_RC: 3771 case IB_QPT_XRC_INI: 3772 *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY | 3773 IB_QP_MAX_QP_RD_ATOMIC; 3774 qp_attr->retry_cnt = cm_id_priv->retry_count; 3775 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count; 3776 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth; 3777 /* fall through */ 3778 case IB_QPT_XRC_TGT: 3779 *qp_attr_mask |= IB_QP_TIMEOUT; 3780 qp_attr->timeout = cm_id_priv->av.timeout; 3781 break; 3782 default: 3783 break; 3784 } 3785 if (cm_id_priv->alt_av.ah_attr.dlid) { 3786 *qp_attr_mask |= IB_QP_PATH_MIG_STATE; 3787 qp_attr->path_mig_state = IB_MIG_REARM; 3788 } 3789 } else { 3790 *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE; 3791 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; 3792 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index; 3793 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout; 3794 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; 3795 qp_attr->path_mig_state = IB_MIG_REARM; 3796 } 3797 ret = 0; 3798 break; 3799 default: 3800 ret = -EINVAL; 3801 break; 3802 } 3803 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3804 return ret; 3805 } 3806 3807 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id, 3808 struct ib_qp_attr *qp_attr, 3809 int *qp_attr_mask) 3810 { 3811 struct cm_id_private *cm_id_priv; 3812 int ret; 3813 3814 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3815 switch (qp_attr->qp_state) { 3816 case IB_QPS_INIT: 3817 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask); 3818 break; 3819 case IB_QPS_RTR: 3820 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask); 3821 break; 3822 case IB_QPS_RTS: 3823 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask); 3824 break; 3825 default: 3826 ret = -EINVAL; 3827 break; 3828 } 3829 return ret; 3830 } 3831 EXPORT_SYMBOL(ib_cm_init_qp_attr); 3832 3833 static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr, 3834 char *buf) 3835 { 3836 struct cm_counter_group *group; 3837 struct cm_counter_attribute *cm_attr; 3838 3839 group = container_of(obj, struct cm_counter_group, obj); 3840 cm_attr = container_of(attr, struct cm_counter_attribute, attr); 3841 3842 return sprintf(buf, "%ld\n", 3843 atomic_long_read(&group->counter[cm_attr->index])); 3844 } 3845 3846 static const struct sysfs_ops cm_counter_ops = { 3847 .show = cm_show_counter 3848 }; 3849 3850 static struct kobj_type cm_counter_obj_type = { 3851 .sysfs_ops = &cm_counter_ops, 3852 .default_attrs = cm_counter_default_attrs 3853 }; 3854 3855 static void cm_release_port_obj(struct kobject *obj) 3856 { 3857 struct cm_port *cm_port; 3858 3859 cm_port = container_of(obj, struct cm_port, port_obj); 3860 kfree(cm_port); 3861 } 3862 3863 static struct kobj_type cm_port_obj_type = { 3864 .release = cm_release_port_obj 3865 }; 3866 3867 static char *cm_devnode(struct device *dev, umode_t *mode) 3868 { 3869 if (mode) 3870 *mode = 0666; 3871 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); 3872 } 3873 3874 struct class cm_class = { 3875 .owner = THIS_MODULE, 3876 .name = "infiniband_cm", 3877 .devnode = cm_devnode, 3878 }; 3879 EXPORT_SYMBOL(cm_class); 3880 3881 static int cm_create_port_fs(struct cm_port *port) 3882 { 3883 int i, ret; 3884 3885 ret = kobject_init_and_add(&port->port_obj, &cm_port_obj_type, 3886 &port->cm_dev->device->kobj, 3887 "%d", port->port_num); 3888 if (ret) { 3889 kfree(port); 3890 return ret; 3891 } 3892 3893 for (i = 0; i < CM_COUNTER_GROUPS; i++) { 3894 ret = kobject_init_and_add(&port->counter_group[i].obj, 3895 &cm_counter_obj_type, 3896 &port->port_obj, 3897 "%s", counter_group_names[i]); 3898 if (ret) 3899 goto error; 3900 } 3901 3902 return 0; 3903 3904 error: 3905 while (i--) 3906 kobject_put(&port->counter_group[i].obj); 3907 kobject_put(&port->port_obj); 3908 return ret; 3909 3910 } 3911 3912 static void cm_remove_port_fs(struct cm_port *port) 3913 { 3914 int i; 3915 3916 for (i = 0; i < CM_COUNTER_GROUPS; i++) 3917 kobject_put(&port->counter_group[i].obj); 3918 3919 kobject_put(&port->port_obj); 3920 } 3921 3922 static void cm_add_one(struct ib_device *ib_device) 3923 { 3924 struct cm_device *cm_dev; 3925 struct cm_port *port; 3926 struct ib_mad_reg_req reg_req = { 3927 .mgmt_class = IB_MGMT_CLASS_CM, 3928 .mgmt_class_version = IB_CM_CLASS_VERSION, 3929 }; 3930 struct ib_port_modify port_modify = { 3931 .set_port_cap_mask = IB_PORT_CM_SUP 3932 }; 3933 unsigned long flags; 3934 int ret; 3935 int count = 0; 3936 u8 i; 3937 3938 cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) * 3939 ib_device->phys_port_cnt, GFP_KERNEL); 3940 if (!cm_dev) 3941 return; 3942 3943 cm_dev->ib_device = ib_device; 3944 cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay; 3945 cm_dev->going_down = 0; 3946 cm_dev->device = device_create(&cm_class, &ib_device->dev, 3947 MKDEV(0, 0), NULL, 3948 "%s", ib_device->name); 3949 if (IS_ERR(cm_dev->device)) { 3950 kfree(cm_dev); 3951 return; 3952 } 3953 3954 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask); 3955 for (i = 1; i <= ib_device->phys_port_cnt; i++) { 3956 if (!rdma_cap_ib_cm(ib_device, i)) 3957 continue; 3958 3959 port = kzalloc(sizeof *port, GFP_KERNEL); 3960 if (!port) 3961 goto error1; 3962 3963 cm_dev->port[i-1] = port; 3964 port->cm_dev = cm_dev; 3965 port->port_num = i; 3966 3967 INIT_LIST_HEAD(&port->cm_priv_prim_list); 3968 INIT_LIST_HEAD(&port->cm_priv_altr_list); 3969 3970 ret = cm_create_port_fs(port); 3971 if (ret) 3972 goto error1; 3973 3974 port->mad_agent = ib_register_mad_agent(ib_device, i, 3975 IB_QPT_GSI, 3976 ®_req, 3977 0, 3978 cm_send_handler, 3979 cm_recv_handler, 3980 port, 3981 0); 3982 if (IS_ERR(port->mad_agent)) 3983 goto error2; 3984 3985 ret = ib_modify_port(ib_device, i, 0, &port_modify); 3986 if (ret) 3987 goto error3; 3988 3989 count++; 3990 } 3991 3992 if (!count) 3993 goto free; 3994 3995 ib_set_client_data(ib_device, &cm_client, cm_dev); 3996 3997 write_lock_irqsave(&cm.device_lock, flags); 3998 list_add_tail(&cm_dev->list, &cm.device_list); 3999 write_unlock_irqrestore(&cm.device_lock, flags); 4000 return; 4001 4002 error3: 4003 ib_unregister_mad_agent(port->mad_agent); 4004 error2: 4005 cm_remove_port_fs(port); 4006 error1: 4007 port_modify.set_port_cap_mask = 0; 4008 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP; 4009 while (--i) { 4010 if (!rdma_cap_ib_cm(ib_device, i)) 4011 continue; 4012 4013 port = cm_dev->port[i-1]; 4014 ib_modify_port(ib_device, port->port_num, 0, &port_modify); 4015 ib_unregister_mad_agent(port->mad_agent); 4016 cm_remove_port_fs(port); 4017 } 4018 free: 4019 device_unregister(cm_dev->device); 4020 kfree(cm_dev); 4021 } 4022 4023 static void cm_remove_one(struct ib_device *ib_device, void *client_data) 4024 { 4025 struct cm_device *cm_dev = client_data; 4026 struct cm_port *port; 4027 struct cm_id_private *cm_id_priv; 4028 struct ib_mad_agent *cur_mad_agent; 4029 struct ib_port_modify port_modify = { 4030 .clr_port_cap_mask = IB_PORT_CM_SUP 4031 }; 4032 unsigned long flags; 4033 int i; 4034 4035 if (!cm_dev) 4036 return; 4037 4038 write_lock_irqsave(&cm.device_lock, flags); 4039 list_del(&cm_dev->list); 4040 write_unlock_irqrestore(&cm.device_lock, flags); 4041 4042 spin_lock_irq(&cm.lock); 4043 cm_dev->going_down = 1; 4044 spin_unlock_irq(&cm.lock); 4045 4046 for (i = 1; i <= ib_device->phys_port_cnt; i++) { 4047 if (!rdma_cap_ib_cm(ib_device, i)) 4048 continue; 4049 4050 port = cm_dev->port[i-1]; 4051 ib_modify_port(ib_device, port->port_num, 0, &port_modify); 4052 /* Mark all the cm_id's as not valid */ 4053 spin_lock_irq(&cm.lock); 4054 list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list) 4055 cm_id_priv->altr_send_port_not_ready = 1; 4056 list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list) 4057 cm_id_priv->prim_send_port_not_ready = 1; 4058 spin_unlock_irq(&cm.lock); 4059 /* 4060 * We flush the queue here after the going_down set, this 4061 * verify that no new works will be queued in the recv handler, 4062 * after that we can call the unregister_mad_agent 4063 */ 4064 flush_workqueue(cm.wq); 4065 spin_lock_irq(&cm.state_lock); 4066 cur_mad_agent = port->mad_agent; 4067 port->mad_agent = NULL; 4068 spin_unlock_irq(&cm.state_lock); 4069 ib_unregister_mad_agent(cur_mad_agent); 4070 cm_remove_port_fs(port); 4071 } 4072 4073 device_unregister(cm_dev->device); 4074 kfree(cm_dev); 4075 } 4076 4077 static int __init ib_cm_init(void) 4078 { 4079 int ret; 4080 4081 memset(&cm, 0, sizeof cm); 4082 INIT_LIST_HEAD(&cm.device_list); 4083 rwlock_init(&cm.device_lock); 4084 spin_lock_init(&cm.lock); 4085 spin_lock_init(&cm.state_lock); 4086 cm.listen_service_table = RB_ROOT; 4087 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); 4088 cm.remote_id_table = RB_ROOT; 4089 cm.remote_qp_table = RB_ROOT; 4090 cm.remote_sidr_table = RB_ROOT; 4091 idr_init(&cm.local_id_table); 4092 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand); 4093 INIT_LIST_HEAD(&cm.timewait_list); 4094 4095 ret = class_register(&cm_class); 4096 if (ret) { 4097 ret = -ENOMEM; 4098 goto error1; 4099 } 4100 4101 cm.wq = create_workqueue("ib_cm"); 4102 if (!cm.wq) { 4103 ret = -ENOMEM; 4104 goto error2; 4105 } 4106 4107 ret = ib_register_client(&cm_client); 4108 if (ret) 4109 goto error3; 4110 4111 return 0; 4112 error3: 4113 destroy_workqueue(cm.wq); 4114 error2: 4115 class_unregister(&cm_class); 4116 error1: 4117 idr_destroy(&cm.local_id_table); 4118 return ret; 4119 } 4120 4121 static void __exit ib_cm_cleanup(void) 4122 { 4123 struct cm_timewait_info *timewait_info, *tmp; 4124 4125 spin_lock_irq(&cm.lock); 4126 list_for_each_entry(timewait_info, &cm.timewait_list, list) 4127 cancel_delayed_work(&timewait_info->work.work); 4128 spin_unlock_irq(&cm.lock); 4129 4130 ib_unregister_client(&cm_client); 4131 destroy_workqueue(cm.wq); 4132 4133 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) { 4134 list_del(&timewait_info->list); 4135 kfree(timewait_info); 4136 } 4137 4138 class_unregister(&cm_class); 4139 idr_destroy(&cm.local_id_table); 4140 } 4141 4142 module_init(ib_cm_init); 4143 module_exit(ib_cm_cleanup); 4144 4145