1 /* 2 * Copyright (c) 2004-2007 Intel Corporation. All rights reserved. 3 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. 5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/completion.h> 37 #include <linux/dma-mapping.h> 38 #include <linux/device.h> 39 #include <linux/module.h> 40 #include <linux/err.h> 41 #include <linux/idr.h> 42 #include <linux/interrupt.h> 43 #include <linux/random.h> 44 #include <linux/rbtree.h> 45 #include <linux/spinlock.h> 46 #include <linux/slab.h> 47 #include <linux/sysfs.h> 48 #include <linux/workqueue.h> 49 #include <linux/kdev_t.h> 50 #include <linux/etherdevice.h> 51 52 #include <rdma/ib_cache.h> 53 #include <rdma/ib_cm.h> 54 #include "cm_msgs.h" 55 56 MODULE_AUTHOR("Sean Hefty"); 57 MODULE_DESCRIPTION("InfiniBand CM"); 58 MODULE_LICENSE("Dual BSD/GPL"); 59 60 static const char * const ibcm_rej_reason_strs[] = { 61 [IB_CM_REJ_NO_QP] = "no QP", 62 [IB_CM_REJ_NO_EEC] = "no EEC", 63 [IB_CM_REJ_NO_RESOURCES] = "no resources", 64 [IB_CM_REJ_TIMEOUT] = "timeout", 65 [IB_CM_REJ_UNSUPPORTED] = "unsupported", 66 [IB_CM_REJ_INVALID_COMM_ID] = "invalid comm ID", 67 [IB_CM_REJ_INVALID_COMM_INSTANCE] = "invalid comm instance", 68 [IB_CM_REJ_INVALID_SERVICE_ID] = "invalid service ID", 69 [IB_CM_REJ_INVALID_TRANSPORT_TYPE] = "invalid transport type", 70 [IB_CM_REJ_STALE_CONN] = "stale conn", 71 [IB_CM_REJ_RDC_NOT_EXIST] = "RDC not exist", 72 [IB_CM_REJ_INVALID_GID] = "invalid GID", 73 [IB_CM_REJ_INVALID_LID] = "invalid LID", 74 [IB_CM_REJ_INVALID_SL] = "invalid SL", 75 [IB_CM_REJ_INVALID_TRAFFIC_CLASS] = "invalid traffic class", 76 [IB_CM_REJ_INVALID_HOP_LIMIT] = "invalid hop limit", 77 [IB_CM_REJ_INVALID_PACKET_RATE] = "invalid packet rate", 78 [IB_CM_REJ_INVALID_ALT_GID] = "invalid alt GID", 79 [IB_CM_REJ_INVALID_ALT_LID] = "invalid alt LID", 80 [IB_CM_REJ_INVALID_ALT_SL] = "invalid alt SL", 81 [IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS] = "invalid alt traffic class", 82 [IB_CM_REJ_INVALID_ALT_HOP_LIMIT] = "invalid alt hop limit", 83 [IB_CM_REJ_INVALID_ALT_PACKET_RATE] = "invalid alt packet rate", 84 [IB_CM_REJ_PORT_CM_REDIRECT] = "port CM redirect", 85 [IB_CM_REJ_PORT_REDIRECT] = "port redirect", 86 [IB_CM_REJ_INVALID_MTU] = "invalid MTU", 87 [IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES] = "insufficient resp resources", 88 [IB_CM_REJ_CONSUMER_DEFINED] = "consumer defined", 89 [IB_CM_REJ_INVALID_RNR_RETRY] = "invalid RNR retry", 90 [IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID] = "duplicate local comm ID", 91 [IB_CM_REJ_INVALID_CLASS_VERSION] = "invalid class version", 92 [IB_CM_REJ_INVALID_FLOW_LABEL] = "invalid flow label", 93 [IB_CM_REJ_INVALID_ALT_FLOW_LABEL] = "invalid alt flow label", 94 }; 95 96 const char *__attribute_const__ ibcm_reject_msg(int reason) 97 { 98 size_t index = reason; 99 100 if (index < ARRAY_SIZE(ibcm_rej_reason_strs) && 101 ibcm_rej_reason_strs[index]) 102 return ibcm_rej_reason_strs[index]; 103 else 104 return "unrecognized reason"; 105 } 106 EXPORT_SYMBOL(ibcm_reject_msg); 107 108 static void cm_add_one(struct ib_device *device); 109 static void cm_remove_one(struct ib_device *device, void *client_data); 110 111 static struct ib_client cm_client = { 112 .name = "cm", 113 .add = cm_add_one, 114 .remove = cm_remove_one 115 }; 116 117 static struct ib_cm { 118 spinlock_t lock; 119 struct list_head device_list; 120 rwlock_t device_lock; 121 struct rb_root listen_service_table; 122 u64 listen_service_id; 123 /* struct rb_root peer_service_table; todo: fix peer to peer */ 124 struct rb_root remote_qp_table; 125 struct rb_root remote_id_table; 126 struct rb_root remote_sidr_table; 127 struct idr local_id_table; 128 __be32 random_id_operand; 129 struct list_head timewait_list; 130 struct workqueue_struct *wq; 131 /* Sync on cm change port state */ 132 spinlock_t state_lock; 133 } cm; 134 135 /* Counter indexes ordered by attribute ID */ 136 enum { 137 CM_REQ_COUNTER, 138 CM_MRA_COUNTER, 139 CM_REJ_COUNTER, 140 CM_REP_COUNTER, 141 CM_RTU_COUNTER, 142 CM_DREQ_COUNTER, 143 CM_DREP_COUNTER, 144 CM_SIDR_REQ_COUNTER, 145 CM_SIDR_REP_COUNTER, 146 CM_LAP_COUNTER, 147 CM_APR_COUNTER, 148 CM_ATTR_COUNT, 149 CM_ATTR_ID_OFFSET = 0x0010, 150 }; 151 152 enum { 153 CM_XMIT, 154 CM_XMIT_RETRIES, 155 CM_RECV, 156 CM_RECV_DUPLICATES, 157 CM_COUNTER_GROUPS 158 }; 159 160 static char const counter_group_names[CM_COUNTER_GROUPS] 161 [sizeof("cm_rx_duplicates")] = { 162 "cm_tx_msgs", "cm_tx_retries", 163 "cm_rx_msgs", "cm_rx_duplicates" 164 }; 165 166 struct cm_counter_group { 167 struct kobject obj; 168 atomic_long_t counter[CM_ATTR_COUNT]; 169 }; 170 171 struct cm_counter_attribute { 172 struct attribute attr; 173 int index; 174 }; 175 176 #define CM_COUNTER_ATTR(_name, _index) \ 177 struct cm_counter_attribute cm_##_name##_counter_attr = { \ 178 .attr = { .name = __stringify(_name), .mode = 0444 }, \ 179 .index = _index \ 180 } 181 182 static CM_COUNTER_ATTR(req, CM_REQ_COUNTER); 183 static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER); 184 static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER); 185 static CM_COUNTER_ATTR(rep, CM_REP_COUNTER); 186 static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER); 187 static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER); 188 static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER); 189 static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER); 190 static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER); 191 static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER); 192 static CM_COUNTER_ATTR(apr, CM_APR_COUNTER); 193 194 static struct attribute *cm_counter_default_attrs[] = { 195 &cm_req_counter_attr.attr, 196 &cm_mra_counter_attr.attr, 197 &cm_rej_counter_attr.attr, 198 &cm_rep_counter_attr.attr, 199 &cm_rtu_counter_attr.attr, 200 &cm_dreq_counter_attr.attr, 201 &cm_drep_counter_attr.attr, 202 &cm_sidr_req_counter_attr.attr, 203 &cm_sidr_rep_counter_attr.attr, 204 &cm_lap_counter_attr.attr, 205 &cm_apr_counter_attr.attr, 206 NULL 207 }; 208 209 struct cm_port { 210 struct cm_device *cm_dev; 211 struct ib_mad_agent *mad_agent; 212 struct kobject port_obj; 213 u8 port_num; 214 struct list_head cm_priv_prim_list; 215 struct list_head cm_priv_altr_list; 216 struct cm_counter_group counter_group[CM_COUNTER_GROUPS]; 217 }; 218 219 struct cm_device { 220 struct list_head list; 221 struct ib_device *ib_device; 222 struct device *device; 223 u8 ack_delay; 224 int going_down; 225 struct cm_port *port[0]; 226 }; 227 228 struct cm_av { 229 struct cm_port *port; 230 union ib_gid dgid; 231 struct rdma_ah_attr ah_attr; 232 u16 pkey_index; 233 u8 timeout; 234 }; 235 236 struct cm_work { 237 struct delayed_work work; 238 struct list_head list; 239 struct cm_port *port; 240 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ 241 __be32 local_id; /* Established / timewait */ 242 __be32 remote_id; 243 struct ib_cm_event cm_event; 244 struct sa_path_rec path[0]; 245 }; 246 247 struct cm_timewait_info { 248 struct cm_work work; /* Must be first. */ 249 struct list_head list; 250 struct rb_node remote_qp_node; 251 struct rb_node remote_id_node; 252 __be64 remote_ca_guid; 253 __be32 remote_qpn; 254 u8 inserted_remote_qp; 255 u8 inserted_remote_id; 256 }; 257 258 struct cm_id_private { 259 struct ib_cm_id id; 260 261 struct rb_node service_node; 262 struct rb_node sidr_id_node; 263 spinlock_t lock; /* Do not acquire inside cm.lock */ 264 struct completion comp; 265 atomic_t refcount; 266 /* Number of clients sharing this ib_cm_id. Only valid for listeners. 267 * Protected by the cm.lock spinlock. */ 268 int listen_sharecount; 269 270 struct ib_mad_send_buf *msg; 271 struct cm_timewait_info *timewait_info; 272 /* todo: use alternate port on send failure */ 273 struct cm_av av; 274 struct cm_av alt_av; 275 276 void *private_data; 277 __be64 tid; 278 __be32 local_qpn; 279 __be32 remote_qpn; 280 enum ib_qp_type qp_type; 281 __be32 sq_psn; 282 __be32 rq_psn; 283 int timeout_ms; 284 enum ib_mtu path_mtu; 285 __be16 pkey; 286 u8 private_data_len; 287 u8 max_cm_retries; 288 u8 peer_to_peer; 289 u8 responder_resources; 290 u8 initiator_depth; 291 u8 retry_count; 292 u8 rnr_retry_count; 293 u8 service_timeout; 294 u8 target_ack_delay; 295 296 struct list_head prim_list; 297 struct list_head altr_list; 298 /* Indicates that the send port mad is registered and av is set */ 299 int prim_send_port_not_ready; 300 int altr_send_port_not_ready; 301 302 struct list_head work_list; 303 atomic_t work_count; 304 }; 305 306 static void cm_work_handler(struct work_struct *work); 307 308 static inline void cm_deref_id(struct cm_id_private *cm_id_priv) 309 { 310 if (atomic_dec_and_test(&cm_id_priv->refcount)) 311 complete(&cm_id_priv->comp); 312 } 313 314 static int cm_alloc_msg(struct cm_id_private *cm_id_priv, 315 struct ib_mad_send_buf **msg) 316 { 317 struct ib_mad_agent *mad_agent; 318 struct ib_mad_send_buf *m; 319 struct ib_ah *ah; 320 struct cm_av *av; 321 unsigned long flags, flags2; 322 int ret = 0; 323 324 /* don't let the port to be released till the agent is down */ 325 spin_lock_irqsave(&cm.state_lock, flags2); 326 spin_lock_irqsave(&cm.lock, flags); 327 if (!cm_id_priv->prim_send_port_not_ready) 328 av = &cm_id_priv->av; 329 else if (!cm_id_priv->altr_send_port_not_ready && 330 (cm_id_priv->alt_av.port)) 331 av = &cm_id_priv->alt_av; 332 else { 333 pr_info("%s: not valid CM id\n", __func__); 334 ret = -ENODEV; 335 spin_unlock_irqrestore(&cm.lock, flags); 336 goto out; 337 } 338 spin_unlock_irqrestore(&cm.lock, flags); 339 /* Make sure the port haven't released the mad yet */ 340 mad_agent = cm_id_priv->av.port->mad_agent; 341 if (!mad_agent) { 342 pr_info("%s: not a valid MAD agent\n", __func__); 343 ret = -ENODEV; 344 goto out; 345 } 346 ah = rdma_create_ah(mad_agent->qp->pd, &av->ah_attr); 347 if (IS_ERR(ah)) { 348 ret = PTR_ERR(ah); 349 goto out; 350 } 351 352 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, 353 av->pkey_index, 354 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 355 GFP_ATOMIC, 356 IB_MGMT_BASE_VERSION); 357 if (IS_ERR(m)) { 358 rdma_destroy_ah(ah); 359 ret = PTR_ERR(m); 360 goto out; 361 } 362 363 /* Timeout set by caller if response is expected. */ 364 m->ah = ah; 365 m->retries = cm_id_priv->max_cm_retries; 366 367 atomic_inc(&cm_id_priv->refcount); 368 m->context[0] = cm_id_priv; 369 *msg = m; 370 371 out: 372 spin_unlock_irqrestore(&cm.state_lock, flags2); 373 return ret; 374 } 375 376 static struct ib_mad_send_buf *cm_alloc_response_msg_no_ah(struct cm_port *port, 377 struct ib_mad_recv_wc *mad_recv_wc) 378 { 379 return ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index, 380 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 381 GFP_ATOMIC, 382 IB_MGMT_BASE_VERSION); 383 } 384 385 static int cm_create_response_msg_ah(struct cm_port *port, 386 struct ib_mad_recv_wc *mad_recv_wc, 387 struct ib_mad_send_buf *msg) 388 { 389 struct ib_ah *ah; 390 391 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc, 392 mad_recv_wc->recv_buf.grh, port->port_num); 393 if (IS_ERR(ah)) 394 return PTR_ERR(ah); 395 396 msg->ah = ah; 397 return 0; 398 } 399 400 static void cm_free_msg(struct ib_mad_send_buf *msg) 401 { 402 if (msg->ah) 403 rdma_destroy_ah(msg->ah); 404 if (msg->context[0]) 405 cm_deref_id(msg->context[0]); 406 ib_free_send_mad(msg); 407 } 408 409 static int cm_alloc_response_msg(struct cm_port *port, 410 struct ib_mad_recv_wc *mad_recv_wc, 411 struct ib_mad_send_buf **msg) 412 { 413 struct ib_mad_send_buf *m; 414 int ret; 415 416 m = cm_alloc_response_msg_no_ah(port, mad_recv_wc); 417 if (IS_ERR(m)) 418 return PTR_ERR(m); 419 420 ret = cm_create_response_msg_ah(port, mad_recv_wc, m); 421 if (ret) { 422 cm_free_msg(m); 423 return ret; 424 } 425 426 *msg = m; 427 return 0; 428 } 429 430 static void * cm_copy_private_data(const void *private_data, 431 u8 private_data_len) 432 { 433 void *data; 434 435 if (!private_data || !private_data_len) 436 return NULL; 437 438 data = kmemdup(private_data, private_data_len, GFP_KERNEL); 439 if (!data) 440 return ERR_PTR(-ENOMEM); 441 442 return data; 443 } 444 445 static void cm_set_private_data(struct cm_id_private *cm_id_priv, 446 void *private_data, u8 private_data_len) 447 { 448 if (cm_id_priv->private_data && cm_id_priv->private_data_len) 449 kfree(cm_id_priv->private_data); 450 451 cm_id_priv->private_data = private_data; 452 cm_id_priv->private_data_len = private_data_len; 453 } 454 455 static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc, 456 struct ib_grh *grh, struct cm_av *av) 457 { 458 av->port = port; 459 av->pkey_index = wc->pkey_index; 460 return ib_init_ah_attr_from_wc(port->cm_dev->ib_device, 461 port->port_num, wc, 462 grh, &av->ah_attr); 463 } 464 465 static int add_cm_id_to_port_list(struct cm_id_private *cm_id_priv, 466 struct cm_av *av, 467 struct cm_port *port) 468 { 469 unsigned long flags; 470 int ret = 0; 471 472 spin_lock_irqsave(&cm.lock, flags); 473 474 if (&cm_id_priv->av == av) 475 list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list); 476 else if (&cm_id_priv->alt_av == av) 477 list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list); 478 else 479 ret = -EINVAL; 480 481 spin_unlock_irqrestore(&cm.lock, flags); 482 return ret; 483 } 484 485 static struct cm_port *get_cm_port_from_path(struct sa_path_rec *path) 486 { 487 struct cm_device *cm_dev; 488 struct cm_port *port = NULL; 489 unsigned long flags; 490 u8 p; 491 struct net_device *ndev = ib_get_ndev_from_path(path); 492 493 read_lock_irqsave(&cm.device_lock, flags); 494 list_for_each_entry(cm_dev, &cm.device_list, list) { 495 if (!ib_find_cached_gid(cm_dev->ib_device, &path->sgid, 496 sa_conv_pathrec_to_gid_type(path), 497 ndev, &p, NULL)) { 498 port = cm_dev->port[p - 1]; 499 break; 500 } 501 } 502 read_unlock_irqrestore(&cm.device_lock, flags); 503 504 if (ndev) 505 dev_put(ndev); 506 return port; 507 } 508 509 static int cm_init_av_by_path(struct sa_path_rec *path, struct cm_av *av, 510 struct cm_id_private *cm_id_priv) 511 { 512 struct cm_device *cm_dev; 513 struct cm_port *port; 514 int ret; 515 516 port = get_cm_port_from_path(path); 517 if (!port) 518 return -EINVAL; 519 cm_dev = port->cm_dev; 520 521 ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num, 522 be16_to_cpu(path->pkey), &av->pkey_index); 523 if (ret) 524 return ret; 525 526 av->port = port; 527 ret = ib_init_ah_attr_from_path(cm_dev->ib_device, port->port_num, path, 528 &av->ah_attr); 529 if (ret) 530 return ret; 531 532 av->timeout = path->packet_life_time + 1; 533 534 ret = add_cm_id_to_port_list(cm_id_priv, av, port); 535 return ret; 536 } 537 538 static int cm_alloc_id(struct cm_id_private *cm_id_priv) 539 { 540 unsigned long flags; 541 int id; 542 543 idr_preload(GFP_KERNEL); 544 spin_lock_irqsave(&cm.lock, flags); 545 546 id = idr_alloc_cyclic(&cm.local_id_table, cm_id_priv, 0, 0, GFP_NOWAIT); 547 548 spin_unlock_irqrestore(&cm.lock, flags); 549 idr_preload_end(); 550 551 cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand; 552 return id < 0 ? id : 0; 553 } 554 555 static void cm_free_id(__be32 local_id) 556 { 557 spin_lock_irq(&cm.lock); 558 idr_remove(&cm.local_id_table, 559 (__force int) (local_id ^ cm.random_id_operand)); 560 spin_unlock_irq(&cm.lock); 561 } 562 563 static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id) 564 { 565 struct cm_id_private *cm_id_priv; 566 567 cm_id_priv = idr_find(&cm.local_id_table, 568 (__force int) (local_id ^ cm.random_id_operand)); 569 if (cm_id_priv) { 570 if (cm_id_priv->id.remote_id == remote_id) 571 atomic_inc(&cm_id_priv->refcount); 572 else 573 cm_id_priv = NULL; 574 } 575 576 return cm_id_priv; 577 } 578 579 static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id) 580 { 581 struct cm_id_private *cm_id_priv; 582 583 spin_lock_irq(&cm.lock); 584 cm_id_priv = cm_get_id(local_id, remote_id); 585 spin_unlock_irq(&cm.lock); 586 587 return cm_id_priv; 588 } 589 590 /* 591 * Trivial helpers to strip endian annotation and compare; the 592 * endianness doesn't actually matter since we just need a stable 593 * order for the RB tree. 594 */ 595 static int be32_lt(__be32 a, __be32 b) 596 { 597 return (__force u32) a < (__force u32) b; 598 } 599 600 static int be32_gt(__be32 a, __be32 b) 601 { 602 return (__force u32) a > (__force u32) b; 603 } 604 605 static int be64_lt(__be64 a, __be64 b) 606 { 607 return (__force u64) a < (__force u64) b; 608 } 609 610 static int be64_gt(__be64 a, __be64 b) 611 { 612 return (__force u64) a > (__force u64) b; 613 } 614 615 static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv) 616 { 617 struct rb_node **link = &cm.listen_service_table.rb_node; 618 struct rb_node *parent = NULL; 619 struct cm_id_private *cur_cm_id_priv; 620 __be64 service_id = cm_id_priv->id.service_id; 621 __be64 service_mask = cm_id_priv->id.service_mask; 622 623 while (*link) { 624 parent = *link; 625 cur_cm_id_priv = rb_entry(parent, struct cm_id_private, 626 service_node); 627 if ((cur_cm_id_priv->id.service_mask & service_id) == 628 (service_mask & cur_cm_id_priv->id.service_id) && 629 (cm_id_priv->id.device == cur_cm_id_priv->id.device)) 630 return cur_cm_id_priv; 631 632 if (cm_id_priv->id.device < cur_cm_id_priv->id.device) 633 link = &(*link)->rb_left; 634 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device) 635 link = &(*link)->rb_right; 636 else if (be64_lt(service_id, cur_cm_id_priv->id.service_id)) 637 link = &(*link)->rb_left; 638 else if (be64_gt(service_id, cur_cm_id_priv->id.service_id)) 639 link = &(*link)->rb_right; 640 else 641 link = &(*link)->rb_right; 642 } 643 rb_link_node(&cm_id_priv->service_node, parent, link); 644 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table); 645 return NULL; 646 } 647 648 static struct cm_id_private * cm_find_listen(struct ib_device *device, 649 __be64 service_id) 650 { 651 struct rb_node *node = cm.listen_service_table.rb_node; 652 struct cm_id_private *cm_id_priv; 653 654 while (node) { 655 cm_id_priv = rb_entry(node, struct cm_id_private, service_node); 656 if ((cm_id_priv->id.service_mask & service_id) == 657 cm_id_priv->id.service_id && 658 (cm_id_priv->id.device == device)) 659 return cm_id_priv; 660 661 if (device < cm_id_priv->id.device) 662 node = node->rb_left; 663 else if (device > cm_id_priv->id.device) 664 node = node->rb_right; 665 else if (be64_lt(service_id, cm_id_priv->id.service_id)) 666 node = node->rb_left; 667 else if (be64_gt(service_id, cm_id_priv->id.service_id)) 668 node = node->rb_right; 669 else 670 node = node->rb_right; 671 } 672 return NULL; 673 } 674 675 static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info 676 *timewait_info) 677 { 678 struct rb_node **link = &cm.remote_id_table.rb_node; 679 struct rb_node *parent = NULL; 680 struct cm_timewait_info *cur_timewait_info; 681 __be64 remote_ca_guid = timewait_info->remote_ca_guid; 682 __be32 remote_id = timewait_info->work.remote_id; 683 684 while (*link) { 685 parent = *link; 686 cur_timewait_info = rb_entry(parent, struct cm_timewait_info, 687 remote_id_node); 688 if (be32_lt(remote_id, cur_timewait_info->work.remote_id)) 689 link = &(*link)->rb_left; 690 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id)) 691 link = &(*link)->rb_right; 692 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) 693 link = &(*link)->rb_left; 694 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) 695 link = &(*link)->rb_right; 696 else 697 return cur_timewait_info; 698 } 699 timewait_info->inserted_remote_id = 1; 700 rb_link_node(&timewait_info->remote_id_node, parent, link); 701 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table); 702 return NULL; 703 } 704 705 static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid, 706 __be32 remote_id) 707 { 708 struct rb_node *node = cm.remote_id_table.rb_node; 709 struct cm_timewait_info *timewait_info; 710 711 while (node) { 712 timewait_info = rb_entry(node, struct cm_timewait_info, 713 remote_id_node); 714 if (be32_lt(remote_id, timewait_info->work.remote_id)) 715 node = node->rb_left; 716 else if (be32_gt(remote_id, timewait_info->work.remote_id)) 717 node = node->rb_right; 718 else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid)) 719 node = node->rb_left; 720 else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid)) 721 node = node->rb_right; 722 else 723 return timewait_info; 724 } 725 return NULL; 726 } 727 728 static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info 729 *timewait_info) 730 { 731 struct rb_node **link = &cm.remote_qp_table.rb_node; 732 struct rb_node *parent = NULL; 733 struct cm_timewait_info *cur_timewait_info; 734 __be64 remote_ca_guid = timewait_info->remote_ca_guid; 735 __be32 remote_qpn = timewait_info->remote_qpn; 736 737 while (*link) { 738 parent = *link; 739 cur_timewait_info = rb_entry(parent, struct cm_timewait_info, 740 remote_qp_node); 741 if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn)) 742 link = &(*link)->rb_left; 743 else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn)) 744 link = &(*link)->rb_right; 745 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) 746 link = &(*link)->rb_left; 747 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) 748 link = &(*link)->rb_right; 749 else 750 return cur_timewait_info; 751 } 752 timewait_info->inserted_remote_qp = 1; 753 rb_link_node(&timewait_info->remote_qp_node, parent, link); 754 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table); 755 return NULL; 756 } 757 758 static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private 759 *cm_id_priv) 760 { 761 struct rb_node **link = &cm.remote_sidr_table.rb_node; 762 struct rb_node *parent = NULL; 763 struct cm_id_private *cur_cm_id_priv; 764 union ib_gid *port_gid = &cm_id_priv->av.dgid; 765 __be32 remote_id = cm_id_priv->id.remote_id; 766 767 while (*link) { 768 parent = *link; 769 cur_cm_id_priv = rb_entry(parent, struct cm_id_private, 770 sidr_id_node); 771 if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id)) 772 link = &(*link)->rb_left; 773 else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id)) 774 link = &(*link)->rb_right; 775 else { 776 int cmp; 777 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid, 778 sizeof *port_gid); 779 if (cmp < 0) 780 link = &(*link)->rb_left; 781 else if (cmp > 0) 782 link = &(*link)->rb_right; 783 else 784 return cur_cm_id_priv; 785 } 786 } 787 rb_link_node(&cm_id_priv->sidr_id_node, parent, link); 788 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 789 return NULL; 790 } 791 792 static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv, 793 enum ib_cm_sidr_status status) 794 { 795 struct ib_cm_sidr_rep_param param; 796 797 memset(¶m, 0, sizeof param); 798 param.status = status; 799 ib_send_cm_sidr_rep(&cm_id_priv->id, ¶m); 800 } 801 802 struct ib_cm_id *ib_create_cm_id(struct ib_device *device, 803 ib_cm_handler cm_handler, 804 void *context) 805 { 806 struct cm_id_private *cm_id_priv; 807 int ret; 808 809 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL); 810 if (!cm_id_priv) 811 return ERR_PTR(-ENOMEM); 812 813 cm_id_priv->id.state = IB_CM_IDLE; 814 cm_id_priv->id.device = device; 815 cm_id_priv->id.cm_handler = cm_handler; 816 cm_id_priv->id.context = context; 817 cm_id_priv->id.remote_cm_qpn = 1; 818 ret = cm_alloc_id(cm_id_priv); 819 if (ret) 820 goto error; 821 822 spin_lock_init(&cm_id_priv->lock); 823 init_completion(&cm_id_priv->comp); 824 INIT_LIST_HEAD(&cm_id_priv->work_list); 825 INIT_LIST_HEAD(&cm_id_priv->prim_list); 826 INIT_LIST_HEAD(&cm_id_priv->altr_list); 827 atomic_set(&cm_id_priv->work_count, -1); 828 atomic_set(&cm_id_priv->refcount, 1); 829 return &cm_id_priv->id; 830 831 error: 832 kfree(cm_id_priv); 833 return ERR_PTR(-ENOMEM); 834 } 835 EXPORT_SYMBOL(ib_create_cm_id); 836 837 static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv) 838 { 839 struct cm_work *work; 840 841 if (list_empty(&cm_id_priv->work_list)) 842 return NULL; 843 844 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list); 845 list_del(&work->list); 846 return work; 847 } 848 849 static void cm_free_work(struct cm_work *work) 850 { 851 if (work->mad_recv_wc) 852 ib_free_recv_mad(work->mad_recv_wc); 853 kfree(work); 854 } 855 856 static inline int cm_convert_to_ms(int iba_time) 857 { 858 /* approximate conversion to ms from 4.096us x 2^iba_time */ 859 return 1 << max(iba_time - 8, 0); 860 } 861 862 /* 863 * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time 864 * Because of how ack_timeout is stored, adding one doubles the timeout. 865 * To avoid large timeouts, select the max(ack_delay, life_time + 1), and 866 * increment it (round up) only if the other is within 50%. 867 */ 868 static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time) 869 { 870 int ack_timeout = packet_life_time + 1; 871 872 if (ack_timeout >= ca_ack_delay) 873 ack_timeout += (ca_ack_delay >= (ack_timeout - 1)); 874 else 875 ack_timeout = ca_ack_delay + 876 (ack_timeout >= (ca_ack_delay - 1)); 877 878 return min(31, ack_timeout); 879 } 880 881 static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info) 882 { 883 if (timewait_info->inserted_remote_id) { 884 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table); 885 timewait_info->inserted_remote_id = 0; 886 } 887 888 if (timewait_info->inserted_remote_qp) { 889 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table); 890 timewait_info->inserted_remote_qp = 0; 891 } 892 } 893 894 static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id) 895 { 896 struct cm_timewait_info *timewait_info; 897 898 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL); 899 if (!timewait_info) 900 return ERR_PTR(-ENOMEM); 901 902 timewait_info->work.local_id = local_id; 903 INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler); 904 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; 905 return timewait_info; 906 } 907 908 static void cm_enter_timewait(struct cm_id_private *cm_id_priv) 909 { 910 int wait_time; 911 unsigned long flags; 912 struct cm_device *cm_dev; 913 914 cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client); 915 if (!cm_dev) 916 return; 917 918 spin_lock_irqsave(&cm.lock, flags); 919 cm_cleanup_timewait(cm_id_priv->timewait_info); 920 list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list); 921 spin_unlock_irqrestore(&cm.lock, flags); 922 923 /* 924 * The cm_id could be destroyed by the user before we exit timewait. 925 * To protect against this, we search for the cm_id after exiting 926 * timewait before notifying the user that we've exited timewait. 927 */ 928 cm_id_priv->id.state = IB_CM_TIMEWAIT; 929 wait_time = cm_convert_to_ms(cm_id_priv->av.timeout); 930 931 /* Check if the device started its remove_one */ 932 spin_lock_irqsave(&cm.lock, flags); 933 if (!cm_dev->going_down) 934 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, 935 msecs_to_jiffies(wait_time)); 936 spin_unlock_irqrestore(&cm.lock, flags); 937 938 cm_id_priv->timewait_info = NULL; 939 } 940 941 static void cm_reset_to_idle(struct cm_id_private *cm_id_priv) 942 { 943 unsigned long flags; 944 945 cm_id_priv->id.state = IB_CM_IDLE; 946 if (cm_id_priv->timewait_info) { 947 spin_lock_irqsave(&cm.lock, flags); 948 cm_cleanup_timewait(cm_id_priv->timewait_info); 949 spin_unlock_irqrestore(&cm.lock, flags); 950 kfree(cm_id_priv->timewait_info); 951 cm_id_priv->timewait_info = NULL; 952 } 953 } 954 955 static void cm_destroy_id(struct ib_cm_id *cm_id, int err) 956 { 957 struct cm_id_private *cm_id_priv; 958 struct cm_work *work; 959 960 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 961 retest: 962 spin_lock_irq(&cm_id_priv->lock); 963 switch (cm_id->state) { 964 case IB_CM_LISTEN: 965 spin_unlock_irq(&cm_id_priv->lock); 966 967 spin_lock_irq(&cm.lock); 968 if (--cm_id_priv->listen_sharecount > 0) { 969 /* The id is still shared. */ 970 cm_deref_id(cm_id_priv); 971 spin_unlock_irq(&cm.lock); 972 return; 973 } 974 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table); 975 spin_unlock_irq(&cm.lock); 976 break; 977 case IB_CM_SIDR_REQ_SENT: 978 cm_id->state = IB_CM_IDLE; 979 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 980 spin_unlock_irq(&cm_id_priv->lock); 981 break; 982 case IB_CM_SIDR_REQ_RCVD: 983 spin_unlock_irq(&cm_id_priv->lock); 984 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT); 985 spin_lock_irq(&cm.lock); 986 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) 987 rb_erase(&cm_id_priv->sidr_id_node, 988 &cm.remote_sidr_table); 989 spin_unlock_irq(&cm.lock); 990 break; 991 case IB_CM_REQ_SENT: 992 case IB_CM_MRA_REQ_RCVD: 993 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 994 spin_unlock_irq(&cm_id_priv->lock); 995 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, 996 &cm_id_priv->id.device->node_guid, 997 sizeof cm_id_priv->id.device->node_guid, 998 NULL, 0); 999 break; 1000 case IB_CM_REQ_RCVD: 1001 if (err == -ENOMEM) { 1002 /* Do not reject to allow future retries. */ 1003 cm_reset_to_idle(cm_id_priv); 1004 spin_unlock_irq(&cm_id_priv->lock); 1005 } else { 1006 spin_unlock_irq(&cm_id_priv->lock); 1007 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, 1008 NULL, 0, NULL, 0); 1009 } 1010 break; 1011 case IB_CM_REP_SENT: 1012 case IB_CM_MRA_REP_RCVD: 1013 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1014 /* Fall through */ 1015 case IB_CM_MRA_REQ_SENT: 1016 case IB_CM_REP_RCVD: 1017 case IB_CM_MRA_REP_SENT: 1018 spin_unlock_irq(&cm_id_priv->lock); 1019 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, 1020 NULL, 0, NULL, 0); 1021 break; 1022 case IB_CM_ESTABLISHED: 1023 spin_unlock_irq(&cm_id_priv->lock); 1024 if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) 1025 break; 1026 ib_send_cm_dreq(cm_id, NULL, 0); 1027 goto retest; 1028 case IB_CM_DREQ_SENT: 1029 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1030 cm_enter_timewait(cm_id_priv); 1031 spin_unlock_irq(&cm_id_priv->lock); 1032 break; 1033 case IB_CM_DREQ_RCVD: 1034 spin_unlock_irq(&cm_id_priv->lock); 1035 ib_send_cm_drep(cm_id, NULL, 0); 1036 break; 1037 default: 1038 spin_unlock_irq(&cm_id_priv->lock); 1039 break; 1040 } 1041 1042 spin_lock_irq(&cm.lock); 1043 if (!list_empty(&cm_id_priv->altr_list) && 1044 (!cm_id_priv->altr_send_port_not_ready)) 1045 list_del(&cm_id_priv->altr_list); 1046 if (!list_empty(&cm_id_priv->prim_list) && 1047 (!cm_id_priv->prim_send_port_not_ready)) 1048 list_del(&cm_id_priv->prim_list); 1049 spin_unlock_irq(&cm.lock); 1050 1051 cm_free_id(cm_id->local_id); 1052 cm_deref_id(cm_id_priv); 1053 wait_for_completion(&cm_id_priv->comp); 1054 while ((work = cm_dequeue_work(cm_id_priv)) != NULL) 1055 cm_free_work(work); 1056 kfree(cm_id_priv->private_data); 1057 kfree(cm_id_priv); 1058 } 1059 1060 void ib_destroy_cm_id(struct ib_cm_id *cm_id) 1061 { 1062 cm_destroy_id(cm_id, 0); 1063 } 1064 EXPORT_SYMBOL(ib_destroy_cm_id); 1065 1066 /** 1067 * __ib_cm_listen - Initiates listening on the specified service ID for 1068 * connection and service ID resolution requests. 1069 * @cm_id: Connection identifier associated with the listen request. 1070 * @service_id: Service identifier matched against incoming connection 1071 * and service ID resolution requests. The service ID should be specified 1072 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will 1073 * assign a service ID to the caller. 1074 * @service_mask: Mask applied to service ID used to listen across a 1075 * range of service IDs. If set to 0, the service ID is matched 1076 * exactly. This parameter is ignored if %service_id is set to 1077 * IB_CM_ASSIGN_SERVICE_ID. 1078 */ 1079 static int __ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, 1080 __be64 service_mask) 1081 { 1082 struct cm_id_private *cm_id_priv, *cur_cm_id_priv; 1083 int ret = 0; 1084 1085 service_mask = service_mask ? service_mask : ~cpu_to_be64(0); 1086 service_id &= service_mask; 1087 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID && 1088 (service_id != IB_CM_ASSIGN_SERVICE_ID)) 1089 return -EINVAL; 1090 1091 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1092 if (cm_id->state != IB_CM_IDLE) 1093 return -EINVAL; 1094 1095 cm_id->state = IB_CM_LISTEN; 1096 ++cm_id_priv->listen_sharecount; 1097 1098 if (service_id == IB_CM_ASSIGN_SERVICE_ID) { 1099 cm_id->service_id = cpu_to_be64(cm.listen_service_id++); 1100 cm_id->service_mask = ~cpu_to_be64(0); 1101 } else { 1102 cm_id->service_id = service_id; 1103 cm_id->service_mask = service_mask; 1104 } 1105 cur_cm_id_priv = cm_insert_listen(cm_id_priv); 1106 1107 if (cur_cm_id_priv) { 1108 cm_id->state = IB_CM_IDLE; 1109 --cm_id_priv->listen_sharecount; 1110 ret = -EBUSY; 1111 } 1112 return ret; 1113 } 1114 1115 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask) 1116 { 1117 unsigned long flags; 1118 int ret; 1119 1120 spin_lock_irqsave(&cm.lock, flags); 1121 ret = __ib_cm_listen(cm_id, service_id, service_mask); 1122 spin_unlock_irqrestore(&cm.lock, flags); 1123 1124 return ret; 1125 } 1126 EXPORT_SYMBOL(ib_cm_listen); 1127 1128 /** 1129 * Create a new listening ib_cm_id and listen on the given service ID. 1130 * 1131 * If there's an existing ID listening on that same device and service ID, 1132 * return it. 1133 * 1134 * @device: Device associated with the cm_id. All related communication will 1135 * be associated with the specified device. 1136 * @cm_handler: Callback invoked to notify the user of CM events. 1137 * @service_id: Service identifier matched against incoming connection 1138 * and service ID resolution requests. The service ID should be specified 1139 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will 1140 * assign a service ID to the caller. 1141 * 1142 * Callers should call ib_destroy_cm_id when done with the listener ID. 1143 */ 1144 struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device, 1145 ib_cm_handler cm_handler, 1146 __be64 service_id) 1147 { 1148 struct cm_id_private *cm_id_priv; 1149 struct ib_cm_id *cm_id; 1150 unsigned long flags; 1151 int err = 0; 1152 1153 /* Create an ID in advance, since the creation may sleep */ 1154 cm_id = ib_create_cm_id(device, cm_handler, NULL); 1155 if (IS_ERR(cm_id)) 1156 return cm_id; 1157 1158 spin_lock_irqsave(&cm.lock, flags); 1159 1160 if (service_id == IB_CM_ASSIGN_SERVICE_ID) 1161 goto new_id; 1162 1163 /* Find an existing ID */ 1164 cm_id_priv = cm_find_listen(device, service_id); 1165 if (cm_id_priv) { 1166 if (cm_id->cm_handler != cm_handler || cm_id->context) { 1167 /* Sharing an ib_cm_id with different handlers is not 1168 * supported */ 1169 spin_unlock_irqrestore(&cm.lock, flags); 1170 return ERR_PTR(-EINVAL); 1171 } 1172 atomic_inc(&cm_id_priv->refcount); 1173 ++cm_id_priv->listen_sharecount; 1174 spin_unlock_irqrestore(&cm.lock, flags); 1175 1176 ib_destroy_cm_id(cm_id); 1177 cm_id = &cm_id_priv->id; 1178 return cm_id; 1179 } 1180 1181 new_id: 1182 /* Use newly created ID */ 1183 err = __ib_cm_listen(cm_id, service_id, 0); 1184 1185 spin_unlock_irqrestore(&cm.lock, flags); 1186 1187 if (err) { 1188 ib_destroy_cm_id(cm_id); 1189 return ERR_PTR(err); 1190 } 1191 return cm_id; 1192 } 1193 EXPORT_SYMBOL(ib_cm_insert_listen); 1194 1195 static __be64 cm_form_tid(struct cm_id_private *cm_id_priv, 1196 enum cm_msg_sequence msg_seq) 1197 { 1198 u64 hi_tid, low_tid; 1199 1200 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32; 1201 low_tid = (u64) ((__force u32)cm_id_priv->id.local_id | 1202 (msg_seq << 30)); 1203 return cpu_to_be64(hi_tid | low_tid); 1204 } 1205 1206 static void cm_format_mad_hdr(struct ib_mad_hdr *hdr, 1207 __be16 attr_id, __be64 tid) 1208 { 1209 hdr->base_version = IB_MGMT_BASE_VERSION; 1210 hdr->mgmt_class = IB_MGMT_CLASS_CM; 1211 hdr->class_version = IB_CM_CLASS_VERSION; 1212 hdr->method = IB_MGMT_METHOD_SEND; 1213 hdr->attr_id = attr_id; 1214 hdr->tid = tid; 1215 } 1216 1217 static void cm_format_req(struct cm_req_msg *req_msg, 1218 struct cm_id_private *cm_id_priv, 1219 struct ib_cm_req_param *param) 1220 { 1221 struct sa_path_rec *pri_path = param->primary_path; 1222 struct sa_path_rec *alt_path = param->alternate_path; 1223 bool pri_ext = false; 1224 1225 if (pri_path->rec_type == SA_PATH_REC_TYPE_OPA) 1226 pri_ext = opa_is_extended_lid(pri_path->opa.dlid, 1227 pri_path->opa.slid); 1228 1229 cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID, 1230 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ)); 1231 1232 req_msg->local_comm_id = cm_id_priv->id.local_id; 1233 req_msg->service_id = param->service_id; 1234 req_msg->local_ca_guid = cm_id_priv->id.device->node_guid; 1235 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num)); 1236 cm_req_set_init_depth(req_msg, param->initiator_depth); 1237 cm_req_set_remote_resp_timeout(req_msg, 1238 param->remote_cm_response_timeout); 1239 cm_req_set_qp_type(req_msg, param->qp_type); 1240 cm_req_set_flow_ctrl(req_msg, param->flow_control); 1241 cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn)); 1242 cm_req_set_local_resp_timeout(req_msg, 1243 param->local_cm_response_timeout); 1244 req_msg->pkey = param->primary_path->pkey; 1245 cm_req_set_path_mtu(req_msg, param->primary_path->mtu); 1246 cm_req_set_max_cm_retries(req_msg, param->max_cm_retries); 1247 1248 if (param->qp_type != IB_QPT_XRC_INI) { 1249 cm_req_set_resp_res(req_msg, param->responder_resources); 1250 cm_req_set_retry_count(req_msg, param->retry_count); 1251 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count); 1252 cm_req_set_srq(req_msg, param->srq); 1253 } 1254 1255 req_msg->primary_local_gid = pri_path->sgid; 1256 req_msg->primary_remote_gid = pri_path->dgid; 1257 if (pri_ext) { 1258 req_msg->primary_local_gid.global.interface_id 1259 = OPA_MAKE_ID(be32_to_cpu(pri_path->opa.slid)); 1260 req_msg->primary_remote_gid.global.interface_id 1261 = OPA_MAKE_ID(be32_to_cpu(pri_path->opa.dlid)); 1262 } 1263 if (pri_path->hop_limit <= 1) { 1264 req_msg->primary_local_lid = pri_ext ? 0 : 1265 htons(ntohl(sa_path_get_slid(pri_path))); 1266 req_msg->primary_remote_lid = pri_ext ? 0 : 1267 htons(ntohl(sa_path_get_dlid(pri_path))); 1268 } else { 1269 /* Work-around until there's a way to obtain remote LID info */ 1270 req_msg->primary_local_lid = IB_LID_PERMISSIVE; 1271 req_msg->primary_remote_lid = IB_LID_PERMISSIVE; 1272 } 1273 cm_req_set_primary_flow_label(req_msg, pri_path->flow_label); 1274 cm_req_set_primary_packet_rate(req_msg, pri_path->rate); 1275 req_msg->primary_traffic_class = pri_path->traffic_class; 1276 req_msg->primary_hop_limit = pri_path->hop_limit; 1277 cm_req_set_primary_sl(req_msg, pri_path->sl); 1278 cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1)); 1279 cm_req_set_primary_local_ack_timeout(req_msg, 1280 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay, 1281 pri_path->packet_life_time)); 1282 1283 if (alt_path) { 1284 bool alt_ext = false; 1285 1286 if (alt_path->rec_type == SA_PATH_REC_TYPE_OPA) 1287 alt_ext = opa_is_extended_lid(alt_path->opa.dlid, 1288 alt_path->opa.slid); 1289 1290 req_msg->alt_local_gid = alt_path->sgid; 1291 req_msg->alt_remote_gid = alt_path->dgid; 1292 if (alt_ext) { 1293 req_msg->alt_local_gid.global.interface_id 1294 = OPA_MAKE_ID(be32_to_cpu(alt_path->opa.slid)); 1295 req_msg->alt_remote_gid.global.interface_id 1296 = OPA_MAKE_ID(be32_to_cpu(alt_path->opa.dlid)); 1297 } 1298 if (alt_path->hop_limit <= 1) { 1299 req_msg->alt_local_lid = alt_ext ? 0 : 1300 htons(ntohl(sa_path_get_slid(alt_path))); 1301 req_msg->alt_remote_lid = alt_ext ? 0 : 1302 htons(ntohl(sa_path_get_dlid(alt_path))); 1303 } else { 1304 req_msg->alt_local_lid = IB_LID_PERMISSIVE; 1305 req_msg->alt_remote_lid = IB_LID_PERMISSIVE; 1306 } 1307 cm_req_set_alt_flow_label(req_msg, 1308 alt_path->flow_label); 1309 cm_req_set_alt_packet_rate(req_msg, alt_path->rate); 1310 req_msg->alt_traffic_class = alt_path->traffic_class; 1311 req_msg->alt_hop_limit = alt_path->hop_limit; 1312 cm_req_set_alt_sl(req_msg, alt_path->sl); 1313 cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1)); 1314 cm_req_set_alt_local_ack_timeout(req_msg, 1315 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay, 1316 alt_path->packet_life_time)); 1317 } 1318 1319 if (param->private_data && param->private_data_len) 1320 memcpy(req_msg->private_data, param->private_data, 1321 param->private_data_len); 1322 } 1323 1324 static int cm_validate_req_param(struct ib_cm_req_param *param) 1325 { 1326 /* peer-to-peer not supported */ 1327 if (param->peer_to_peer) 1328 return -EINVAL; 1329 1330 if (!param->primary_path) 1331 return -EINVAL; 1332 1333 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC && 1334 param->qp_type != IB_QPT_XRC_INI) 1335 return -EINVAL; 1336 1337 if (param->private_data && 1338 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE) 1339 return -EINVAL; 1340 1341 if (param->alternate_path && 1342 (param->alternate_path->pkey != param->primary_path->pkey || 1343 param->alternate_path->mtu != param->primary_path->mtu)) 1344 return -EINVAL; 1345 1346 return 0; 1347 } 1348 1349 int ib_send_cm_req(struct ib_cm_id *cm_id, 1350 struct ib_cm_req_param *param) 1351 { 1352 struct cm_id_private *cm_id_priv; 1353 struct cm_req_msg *req_msg; 1354 unsigned long flags; 1355 int ret; 1356 1357 ret = cm_validate_req_param(param); 1358 if (ret) 1359 return ret; 1360 1361 /* Verify that we're not in timewait. */ 1362 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1363 spin_lock_irqsave(&cm_id_priv->lock, flags); 1364 if (cm_id->state != IB_CM_IDLE) { 1365 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1366 ret = -EINVAL; 1367 goto out; 1368 } 1369 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1370 1371 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> 1372 id.local_id); 1373 if (IS_ERR(cm_id_priv->timewait_info)) { 1374 ret = PTR_ERR(cm_id_priv->timewait_info); 1375 goto out; 1376 } 1377 1378 ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av, 1379 cm_id_priv); 1380 if (ret) 1381 goto error1; 1382 if (param->alternate_path) { 1383 ret = cm_init_av_by_path(param->alternate_path, 1384 &cm_id_priv->alt_av, cm_id_priv); 1385 if (ret) 1386 goto error1; 1387 } 1388 cm_id->service_id = param->service_id; 1389 cm_id->service_mask = ~cpu_to_be64(0); 1390 cm_id_priv->timeout_ms = cm_convert_to_ms( 1391 param->primary_path->packet_life_time) * 2 + 1392 cm_convert_to_ms( 1393 param->remote_cm_response_timeout); 1394 cm_id_priv->max_cm_retries = param->max_cm_retries; 1395 cm_id_priv->initiator_depth = param->initiator_depth; 1396 cm_id_priv->responder_resources = param->responder_resources; 1397 cm_id_priv->retry_count = param->retry_count; 1398 cm_id_priv->path_mtu = param->primary_path->mtu; 1399 cm_id_priv->pkey = param->primary_path->pkey; 1400 cm_id_priv->qp_type = param->qp_type; 1401 1402 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg); 1403 if (ret) 1404 goto error1; 1405 1406 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad; 1407 cm_format_req(req_msg, cm_id_priv, param); 1408 cm_id_priv->tid = req_msg->hdr.tid; 1409 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms; 1410 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT; 1411 1412 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg); 1413 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg); 1414 1415 spin_lock_irqsave(&cm_id_priv->lock, flags); 1416 ret = ib_post_send_mad(cm_id_priv->msg, NULL); 1417 if (ret) { 1418 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1419 goto error2; 1420 } 1421 BUG_ON(cm_id->state != IB_CM_IDLE); 1422 cm_id->state = IB_CM_REQ_SENT; 1423 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1424 return 0; 1425 1426 error2: cm_free_msg(cm_id_priv->msg); 1427 error1: kfree(cm_id_priv->timewait_info); 1428 out: return ret; 1429 } 1430 EXPORT_SYMBOL(ib_send_cm_req); 1431 1432 static int cm_issue_rej(struct cm_port *port, 1433 struct ib_mad_recv_wc *mad_recv_wc, 1434 enum ib_cm_rej_reason reason, 1435 enum cm_msg_response msg_rejected, 1436 void *ari, u8 ari_length) 1437 { 1438 struct ib_mad_send_buf *msg = NULL; 1439 struct cm_rej_msg *rej_msg, *rcv_msg; 1440 int ret; 1441 1442 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg); 1443 if (ret) 1444 return ret; 1445 1446 /* We just need common CM header information. Cast to any message. */ 1447 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad; 1448 rej_msg = (struct cm_rej_msg *) msg->mad; 1449 1450 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid); 1451 rej_msg->remote_comm_id = rcv_msg->local_comm_id; 1452 rej_msg->local_comm_id = rcv_msg->remote_comm_id; 1453 cm_rej_set_msg_rejected(rej_msg, msg_rejected); 1454 rej_msg->reason = cpu_to_be16(reason); 1455 1456 if (ari && ari_length) { 1457 cm_rej_set_reject_info_len(rej_msg, ari_length); 1458 memcpy(rej_msg->ari, ari, ari_length); 1459 } 1460 1461 ret = ib_post_send_mad(msg, NULL); 1462 if (ret) 1463 cm_free_msg(msg); 1464 1465 return ret; 1466 } 1467 1468 static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid, 1469 __be32 local_qpn, __be32 remote_qpn) 1470 { 1471 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) || 1472 ((local_ca_guid == remote_ca_guid) && 1473 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn)))); 1474 } 1475 1476 static bool cm_req_has_alt_path(struct cm_req_msg *req_msg) 1477 { 1478 return ((req_msg->alt_local_lid) || 1479 (ib_is_opa_gid(&req_msg->alt_local_gid))); 1480 } 1481 1482 static void cm_path_set_rec_type(struct ib_device *ib_device, u8 port_num, 1483 struct sa_path_rec *path, union ib_gid *gid) 1484 { 1485 if (ib_is_opa_gid(gid) && rdma_cap_opa_ah(ib_device, port_num)) 1486 path->rec_type = SA_PATH_REC_TYPE_OPA; 1487 else 1488 path->rec_type = SA_PATH_REC_TYPE_IB; 1489 } 1490 1491 static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg, 1492 struct sa_path_rec *primary_path, 1493 struct sa_path_rec *alt_path) 1494 { 1495 u32 lid; 1496 1497 if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) { 1498 sa_path_set_dlid(primary_path, 1499 ntohs(req_msg->primary_local_lid)); 1500 sa_path_set_slid(primary_path, 1501 ntohs(req_msg->primary_remote_lid)); 1502 } else { 1503 lid = opa_get_lid_from_gid(&req_msg->primary_local_gid); 1504 sa_path_set_dlid(primary_path, lid); 1505 1506 lid = opa_get_lid_from_gid(&req_msg->primary_remote_gid); 1507 sa_path_set_slid(primary_path, lid); 1508 } 1509 1510 if (!cm_req_has_alt_path(req_msg)) 1511 return; 1512 1513 if (alt_path->rec_type != SA_PATH_REC_TYPE_OPA) { 1514 sa_path_set_dlid(alt_path, ntohs(req_msg->alt_local_lid)); 1515 sa_path_set_slid(alt_path, ntohs(req_msg->alt_remote_lid)); 1516 } else { 1517 lid = opa_get_lid_from_gid(&req_msg->alt_local_gid); 1518 sa_path_set_dlid(alt_path, lid); 1519 1520 lid = opa_get_lid_from_gid(&req_msg->alt_remote_gid); 1521 sa_path_set_slid(alt_path, lid); 1522 } 1523 } 1524 1525 static void cm_format_paths_from_req(struct cm_req_msg *req_msg, 1526 struct sa_path_rec *primary_path, 1527 struct sa_path_rec *alt_path) 1528 { 1529 primary_path->dgid = req_msg->primary_local_gid; 1530 primary_path->sgid = req_msg->primary_remote_gid; 1531 primary_path->flow_label = cm_req_get_primary_flow_label(req_msg); 1532 primary_path->hop_limit = req_msg->primary_hop_limit; 1533 primary_path->traffic_class = req_msg->primary_traffic_class; 1534 primary_path->reversible = 1; 1535 primary_path->pkey = req_msg->pkey; 1536 primary_path->sl = cm_req_get_primary_sl(req_msg); 1537 primary_path->mtu_selector = IB_SA_EQ; 1538 primary_path->mtu = cm_req_get_path_mtu(req_msg); 1539 primary_path->rate_selector = IB_SA_EQ; 1540 primary_path->rate = cm_req_get_primary_packet_rate(req_msg); 1541 primary_path->packet_life_time_selector = IB_SA_EQ; 1542 primary_path->packet_life_time = 1543 cm_req_get_primary_local_ack_timeout(req_msg); 1544 primary_path->packet_life_time -= (primary_path->packet_life_time > 0); 1545 primary_path->service_id = req_msg->service_id; 1546 if (sa_path_is_roce(primary_path)) 1547 primary_path->roce.route_resolved = false; 1548 1549 if (cm_req_has_alt_path(req_msg)) { 1550 alt_path->dgid = req_msg->alt_local_gid; 1551 alt_path->sgid = req_msg->alt_remote_gid; 1552 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg); 1553 alt_path->hop_limit = req_msg->alt_hop_limit; 1554 alt_path->traffic_class = req_msg->alt_traffic_class; 1555 alt_path->reversible = 1; 1556 alt_path->pkey = req_msg->pkey; 1557 alt_path->sl = cm_req_get_alt_sl(req_msg); 1558 alt_path->mtu_selector = IB_SA_EQ; 1559 alt_path->mtu = cm_req_get_path_mtu(req_msg); 1560 alt_path->rate_selector = IB_SA_EQ; 1561 alt_path->rate = cm_req_get_alt_packet_rate(req_msg); 1562 alt_path->packet_life_time_selector = IB_SA_EQ; 1563 alt_path->packet_life_time = 1564 cm_req_get_alt_local_ack_timeout(req_msg); 1565 alt_path->packet_life_time -= (alt_path->packet_life_time > 0); 1566 alt_path->service_id = req_msg->service_id; 1567 1568 if (sa_path_is_roce(alt_path)) 1569 alt_path->roce.route_resolved = false; 1570 } 1571 cm_format_path_lid_from_req(req_msg, primary_path, alt_path); 1572 } 1573 1574 static u16 cm_get_bth_pkey(struct cm_work *work) 1575 { 1576 struct ib_device *ib_dev = work->port->cm_dev->ib_device; 1577 u8 port_num = work->port->port_num; 1578 u16 pkey_index = work->mad_recv_wc->wc->pkey_index; 1579 u16 pkey; 1580 int ret; 1581 1582 ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey); 1583 if (ret) { 1584 dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %d, pkey index %d). %d\n", 1585 port_num, pkey_index, ret); 1586 return 0; 1587 } 1588 1589 return pkey; 1590 } 1591 1592 /** 1593 * Convert OPA SGID to IB SGID 1594 * ULPs (such as IPoIB) do not understand OPA GIDs and will 1595 * reject them as the local_gid will not match the sgid. Therefore, 1596 * change the pathrec's SGID to an IB SGID. 1597 * 1598 * @work: Work completion 1599 * @path: Path record 1600 */ 1601 static void cm_opa_to_ib_sgid(struct cm_work *work, 1602 struct sa_path_rec *path) 1603 { 1604 struct ib_device *dev = work->port->cm_dev->ib_device; 1605 u8 port_num = work->port->port_num; 1606 1607 if (rdma_cap_opa_ah(dev, port_num) && 1608 (ib_is_opa_gid(&path->sgid))) { 1609 union ib_gid sgid; 1610 1611 if (ib_get_cached_gid(dev, port_num, 0, &sgid, NULL)) { 1612 dev_warn(&dev->dev, 1613 "Error updating sgid in CM request\n"); 1614 return; 1615 } 1616 1617 path->sgid = sgid; 1618 } 1619 } 1620 1621 static void cm_format_req_event(struct cm_work *work, 1622 struct cm_id_private *cm_id_priv, 1623 struct ib_cm_id *listen_id) 1624 { 1625 struct cm_req_msg *req_msg; 1626 struct ib_cm_req_event_param *param; 1627 1628 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1629 param = &work->cm_event.param.req_rcvd; 1630 param->listen_id = listen_id; 1631 param->bth_pkey = cm_get_bth_pkey(work); 1632 param->port = cm_id_priv->av.port->port_num; 1633 param->primary_path = &work->path[0]; 1634 cm_opa_to_ib_sgid(work, param->primary_path); 1635 if (cm_req_has_alt_path(req_msg)) { 1636 param->alternate_path = &work->path[1]; 1637 cm_opa_to_ib_sgid(work, param->alternate_path); 1638 } else { 1639 param->alternate_path = NULL; 1640 } 1641 param->remote_ca_guid = req_msg->local_ca_guid; 1642 param->remote_qkey = be32_to_cpu(req_msg->local_qkey); 1643 param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg)); 1644 param->qp_type = cm_req_get_qp_type(req_msg); 1645 param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg)); 1646 param->responder_resources = cm_req_get_init_depth(req_msg); 1647 param->initiator_depth = cm_req_get_resp_res(req_msg); 1648 param->local_cm_response_timeout = 1649 cm_req_get_remote_resp_timeout(req_msg); 1650 param->flow_control = cm_req_get_flow_ctrl(req_msg); 1651 param->remote_cm_response_timeout = 1652 cm_req_get_local_resp_timeout(req_msg); 1653 param->retry_count = cm_req_get_retry_count(req_msg); 1654 param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); 1655 param->srq = cm_req_get_srq(req_msg); 1656 work->cm_event.private_data = &req_msg->private_data; 1657 } 1658 1659 static void cm_process_work(struct cm_id_private *cm_id_priv, 1660 struct cm_work *work) 1661 { 1662 int ret; 1663 1664 /* We will typically only have the current event to report. */ 1665 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event); 1666 cm_free_work(work); 1667 1668 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) { 1669 spin_lock_irq(&cm_id_priv->lock); 1670 work = cm_dequeue_work(cm_id_priv); 1671 spin_unlock_irq(&cm_id_priv->lock); 1672 BUG_ON(!work); 1673 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, 1674 &work->cm_event); 1675 cm_free_work(work); 1676 } 1677 cm_deref_id(cm_id_priv); 1678 if (ret) 1679 cm_destroy_id(&cm_id_priv->id, ret); 1680 } 1681 1682 static void cm_format_mra(struct cm_mra_msg *mra_msg, 1683 struct cm_id_private *cm_id_priv, 1684 enum cm_msg_response msg_mraed, u8 service_timeout, 1685 const void *private_data, u8 private_data_len) 1686 { 1687 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid); 1688 cm_mra_set_msg_mraed(mra_msg, msg_mraed); 1689 mra_msg->local_comm_id = cm_id_priv->id.local_id; 1690 mra_msg->remote_comm_id = cm_id_priv->id.remote_id; 1691 cm_mra_set_service_timeout(mra_msg, service_timeout); 1692 1693 if (private_data && private_data_len) 1694 memcpy(mra_msg->private_data, private_data, private_data_len); 1695 } 1696 1697 static void cm_format_rej(struct cm_rej_msg *rej_msg, 1698 struct cm_id_private *cm_id_priv, 1699 enum ib_cm_rej_reason reason, 1700 void *ari, 1701 u8 ari_length, 1702 const void *private_data, 1703 u8 private_data_len) 1704 { 1705 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid); 1706 rej_msg->remote_comm_id = cm_id_priv->id.remote_id; 1707 1708 switch(cm_id_priv->id.state) { 1709 case IB_CM_REQ_RCVD: 1710 rej_msg->local_comm_id = 0; 1711 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ); 1712 break; 1713 case IB_CM_MRA_REQ_SENT: 1714 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1715 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ); 1716 break; 1717 case IB_CM_REP_RCVD: 1718 case IB_CM_MRA_REP_SENT: 1719 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1720 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP); 1721 break; 1722 default: 1723 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1724 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER); 1725 break; 1726 } 1727 1728 rej_msg->reason = cpu_to_be16(reason); 1729 if (ari && ari_length) { 1730 cm_rej_set_reject_info_len(rej_msg, ari_length); 1731 memcpy(rej_msg->ari, ari, ari_length); 1732 } 1733 1734 if (private_data && private_data_len) 1735 memcpy(rej_msg->private_data, private_data, private_data_len); 1736 } 1737 1738 static void cm_dup_req_handler(struct cm_work *work, 1739 struct cm_id_private *cm_id_priv) 1740 { 1741 struct ib_mad_send_buf *msg = NULL; 1742 int ret; 1743 1744 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. 1745 counter[CM_REQ_COUNTER]); 1746 1747 /* Quick state check to discard duplicate REQs. */ 1748 if (cm_id_priv->id.state == IB_CM_REQ_RCVD) 1749 return; 1750 1751 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); 1752 if (ret) 1753 return; 1754 1755 spin_lock_irq(&cm_id_priv->lock); 1756 switch (cm_id_priv->id.state) { 1757 case IB_CM_MRA_REQ_SENT: 1758 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 1759 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout, 1760 cm_id_priv->private_data, 1761 cm_id_priv->private_data_len); 1762 break; 1763 case IB_CM_TIMEWAIT: 1764 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv, 1765 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0); 1766 break; 1767 default: 1768 goto unlock; 1769 } 1770 spin_unlock_irq(&cm_id_priv->lock); 1771 1772 ret = ib_post_send_mad(msg, NULL); 1773 if (ret) 1774 goto free; 1775 return; 1776 1777 unlock: spin_unlock_irq(&cm_id_priv->lock); 1778 free: cm_free_msg(msg); 1779 } 1780 1781 static struct cm_id_private * cm_match_req(struct cm_work *work, 1782 struct cm_id_private *cm_id_priv) 1783 { 1784 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv; 1785 struct cm_timewait_info *timewait_info; 1786 struct cm_req_msg *req_msg; 1787 struct ib_cm_id *cm_id; 1788 1789 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1790 1791 /* Check for possible duplicate REQ. */ 1792 spin_lock_irq(&cm.lock); 1793 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info); 1794 if (timewait_info) { 1795 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id, 1796 timewait_info->work.remote_id); 1797 spin_unlock_irq(&cm.lock); 1798 if (cur_cm_id_priv) { 1799 cm_dup_req_handler(work, cur_cm_id_priv); 1800 cm_deref_id(cur_cm_id_priv); 1801 } 1802 return NULL; 1803 } 1804 1805 /* Check for stale connections. */ 1806 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info); 1807 if (timewait_info) { 1808 cm_cleanup_timewait(cm_id_priv->timewait_info); 1809 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id, 1810 timewait_info->work.remote_id); 1811 1812 spin_unlock_irq(&cm.lock); 1813 cm_issue_rej(work->port, work->mad_recv_wc, 1814 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ, 1815 NULL, 0); 1816 if (cur_cm_id_priv) { 1817 cm_id = &cur_cm_id_priv->id; 1818 ib_send_cm_dreq(cm_id, NULL, 0); 1819 cm_deref_id(cur_cm_id_priv); 1820 } 1821 return NULL; 1822 } 1823 1824 /* Find matching listen request. */ 1825 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device, 1826 req_msg->service_id); 1827 if (!listen_cm_id_priv) { 1828 cm_cleanup_timewait(cm_id_priv->timewait_info); 1829 spin_unlock_irq(&cm.lock); 1830 cm_issue_rej(work->port, work->mad_recv_wc, 1831 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ, 1832 NULL, 0); 1833 goto out; 1834 } 1835 atomic_inc(&listen_cm_id_priv->refcount); 1836 atomic_inc(&cm_id_priv->refcount); 1837 cm_id_priv->id.state = IB_CM_REQ_RCVD; 1838 atomic_inc(&cm_id_priv->work_count); 1839 spin_unlock_irq(&cm.lock); 1840 out: 1841 return listen_cm_id_priv; 1842 } 1843 1844 /* 1845 * Work-around for inter-subnet connections. If the LIDs are permissive, 1846 * we need to override the LID/SL data in the REQ with the LID information 1847 * in the work completion. 1848 */ 1849 static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc) 1850 { 1851 if (!cm_req_get_primary_subnet_local(req_msg)) { 1852 if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) { 1853 req_msg->primary_local_lid = ib_lid_be16(wc->slid); 1854 cm_req_set_primary_sl(req_msg, wc->sl); 1855 } 1856 1857 if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE) 1858 req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits); 1859 } 1860 1861 if (!cm_req_get_alt_subnet_local(req_msg)) { 1862 if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) { 1863 req_msg->alt_local_lid = ib_lid_be16(wc->slid); 1864 cm_req_set_alt_sl(req_msg, wc->sl); 1865 } 1866 1867 if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE) 1868 req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits); 1869 } 1870 } 1871 1872 static int cm_req_handler(struct cm_work *work) 1873 { 1874 struct ib_cm_id *cm_id; 1875 struct cm_id_private *cm_id_priv, *listen_cm_id_priv; 1876 struct cm_req_msg *req_msg; 1877 union ib_gid gid; 1878 struct ib_gid_attr gid_attr; 1879 const struct ib_global_route *grh; 1880 int ret; 1881 1882 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1883 1884 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL); 1885 if (IS_ERR(cm_id)) 1886 return PTR_ERR(cm_id); 1887 1888 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1889 cm_id_priv->id.remote_id = req_msg->local_comm_id; 1890 ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 1891 work->mad_recv_wc->recv_buf.grh, 1892 &cm_id_priv->av); 1893 if (ret) 1894 goto destroy; 1895 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> 1896 id.local_id); 1897 if (IS_ERR(cm_id_priv->timewait_info)) { 1898 ret = PTR_ERR(cm_id_priv->timewait_info); 1899 goto destroy; 1900 } 1901 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id; 1902 cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid; 1903 cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg); 1904 1905 listen_cm_id_priv = cm_match_req(work, cm_id_priv); 1906 if (!listen_cm_id_priv) { 1907 pr_debug("%s: local_id %d, no listen_cm_id_priv\n", __func__, 1908 be32_to_cpu(cm_id->local_id)); 1909 ret = -EINVAL; 1910 goto free_timeinfo; 1911 } 1912 1913 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; 1914 cm_id_priv->id.context = listen_cm_id_priv->id.context; 1915 cm_id_priv->id.service_id = req_msg->service_id; 1916 cm_id_priv->id.service_mask = ~cpu_to_be64(0); 1917 1918 cm_process_routed_req(req_msg, work->mad_recv_wc->wc); 1919 1920 memset(&work->path[0], 0, sizeof(work->path[0])); 1921 if (cm_req_has_alt_path(req_msg)) 1922 memset(&work->path[1], 0, sizeof(work->path[1])); 1923 grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr); 1924 ret = ib_get_cached_gid(work->port->cm_dev->ib_device, 1925 work->port->port_num, 1926 grh->sgid_index, 1927 &gid, &gid_attr); 1928 if (ret) { 1929 ib_send_cm_rej(cm_id, IB_CM_REJ_UNSUPPORTED, NULL, 0, NULL, 0); 1930 goto rejected; 1931 } 1932 1933 if (gid_attr.ndev) { 1934 work->path[0].rec_type = 1935 sa_conv_gid_to_pathrec_type(gid_attr.gid_type); 1936 sa_path_set_ifindex(&work->path[0], 1937 gid_attr.ndev->ifindex); 1938 sa_path_set_ndev(&work->path[0], 1939 dev_net(gid_attr.ndev)); 1940 dev_put(gid_attr.ndev); 1941 } else { 1942 cm_path_set_rec_type(work->port->cm_dev->ib_device, 1943 work->port->port_num, 1944 &work->path[0], 1945 &req_msg->primary_local_gid); 1946 } 1947 if (cm_req_has_alt_path(req_msg)) 1948 work->path[1].rec_type = work->path[0].rec_type; 1949 cm_format_paths_from_req(req_msg, &work->path[0], 1950 &work->path[1]); 1951 if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE) 1952 sa_path_set_dmac(&work->path[0], 1953 cm_id_priv->av.ah_attr.roce.dmac); 1954 work->path[0].hop_limit = grh->hop_limit; 1955 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av, 1956 cm_id_priv); 1957 if (ret) { 1958 int err; 1959 1960 err = ib_get_cached_gid(work->port->cm_dev->ib_device, 1961 work->port->port_num, 0, 1962 &work->path[0].sgid, 1963 NULL); 1964 if (err) 1965 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID, 1966 NULL, 0, NULL, 0); 1967 else 1968 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID, 1969 &work->path[0].sgid, 1970 sizeof(work->path[0].sgid), 1971 NULL, 0); 1972 goto rejected; 1973 } 1974 if (cm_req_has_alt_path(req_msg)) { 1975 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av, 1976 cm_id_priv); 1977 if (ret) { 1978 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID, 1979 &work->path[0].sgid, 1980 sizeof(work->path[0].sgid), NULL, 0); 1981 goto rejected; 1982 } 1983 } 1984 cm_id_priv->tid = req_msg->hdr.tid; 1985 cm_id_priv->timeout_ms = cm_convert_to_ms( 1986 cm_req_get_local_resp_timeout(req_msg)); 1987 cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg); 1988 cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg); 1989 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg); 1990 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg); 1991 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg); 1992 cm_id_priv->pkey = req_msg->pkey; 1993 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg); 1994 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg); 1995 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); 1996 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg); 1997 1998 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id); 1999 cm_process_work(cm_id_priv, work); 2000 cm_deref_id(listen_cm_id_priv); 2001 return 0; 2002 2003 rejected: 2004 atomic_dec(&cm_id_priv->refcount); 2005 cm_deref_id(listen_cm_id_priv); 2006 free_timeinfo: 2007 kfree(cm_id_priv->timewait_info); 2008 destroy: 2009 ib_destroy_cm_id(cm_id); 2010 return ret; 2011 } 2012 2013 static void cm_format_rep(struct cm_rep_msg *rep_msg, 2014 struct cm_id_private *cm_id_priv, 2015 struct ib_cm_rep_param *param) 2016 { 2017 cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid); 2018 rep_msg->local_comm_id = cm_id_priv->id.local_id; 2019 rep_msg->remote_comm_id = cm_id_priv->id.remote_id; 2020 cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn)); 2021 rep_msg->resp_resources = param->responder_resources; 2022 cm_rep_set_target_ack_delay(rep_msg, 2023 cm_id_priv->av.port->cm_dev->ack_delay); 2024 cm_rep_set_failover(rep_msg, param->failover_accepted); 2025 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count); 2026 rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid; 2027 2028 if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) { 2029 rep_msg->initiator_depth = param->initiator_depth; 2030 cm_rep_set_flow_ctrl(rep_msg, param->flow_control); 2031 cm_rep_set_srq(rep_msg, param->srq); 2032 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num)); 2033 } else { 2034 cm_rep_set_srq(rep_msg, 1); 2035 cm_rep_set_local_eecn(rep_msg, cpu_to_be32(param->qp_num)); 2036 } 2037 2038 if (param->private_data && param->private_data_len) 2039 memcpy(rep_msg->private_data, param->private_data, 2040 param->private_data_len); 2041 } 2042 2043 int ib_send_cm_rep(struct ib_cm_id *cm_id, 2044 struct ib_cm_rep_param *param) 2045 { 2046 struct cm_id_private *cm_id_priv; 2047 struct ib_mad_send_buf *msg; 2048 struct cm_rep_msg *rep_msg; 2049 unsigned long flags; 2050 int ret; 2051 2052 if (param->private_data && 2053 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE) 2054 return -EINVAL; 2055 2056 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2057 spin_lock_irqsave(&cm_id_priv->lock, flags); 2058 if (cm_id->state != IB_CM_REQ_RCVD && 2059 cm_id->state != IB_CM_MRA_REQ_SENT) { 2060 pr_debug("%s: local_comm_id %d, cm_id->state: %d\n", __func__, 2061 be32_to_cpu(cm_id_priv->id.local_id), cm_id->state); 2062 ret = -EINVAL; 2063 goto out; 2064 } 2065 2066 ret = cm_alloc_msg(cm_id_priv, &msg); 2067 if (ret) 2068 goto out; 2069 2070 rep_msg = (struct cm_rep_msg *) msg->mad; 2071 cm_format_rep(rep_msg, cm_id_priv, param); 2072 msg->timeout_ms = cm_id_priv->timeout_ms; 2073 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT; 2074 2075 ret = ib_post_send_mad(msg, NULL); 2076 if (ret) { 2077 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2078 cm_free_msg(msg); 2079 return ret; 2080 } 2081 2082 cm_id->state = IB_CM_REP_SENT; 2083 cm_id_priv->msg = msg; 2084 cm_id_priv->initiator_depth = param->initiator_depth; 2085 cm_id_priv->responder_resources = param->responder_resources; 2086 cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg); 2087 cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF); 2088 2089 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2090 return ret; 2091 } 2092 EXPORT_SYMBOL(ib_send_cm_rep); 2093 2094 static void cm_format_rtu(struct cm_rtu_msg *rtu_msg, 2095 struct cm_id_private *cm_id_priv, 2096 const void *private_data, 2097 u8 private_data_len) 2098 { 2099 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid); 2100 rtu_msg->local_comm_id = cm_id_priv->id.local_id; 2101 rtu_msg->remote_comm_id = cm_id_priv->id.remote_id; 2102 2103 if (private_data && private_data_len) 2104 memcpy(rtu_msg->private_data, private_data, private_data_len); 2105 } 2106 2107 int ib_send_cm_rtu(struct ib_cm_id *cm_id, 2108 const void *private_data, 2109 u8 private_data_len) 2110 { 2111 struct cm_id_private *cm_id_priv; 2112 struct ib_mad_send_buf *msg; 2113 unsigned long flags; 2114 void *data; 2115 int ret; 2116 2117 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE) 2118 return -EINVAL; 2119 2120 data = cm_copy_private_data(private_data, private_data_len); 2121 if (IS_ERR(data)) 2122 return PTR_ERR(data); 2123 2124 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2125 spin_lock_irqsave(&cm_id_priv->lock, flags); 2126 if (cm_id->state != IB_CM_REP_RCVD && 2127 cm_id->state != IB_CM_MRA_REP_SENT) { 2128 pr_debug("%s: local_id %d, cm_id->state %d\n", __func__, 2129 be32_to_cpu(cm_id->local_id), cm_id->state); 2130 ret = -EINVAL; 2131 goto error; 2132 } 2133 2134 ret = cm_alloc_msg(cm_id_priv, &msg); 2135 if (ret) 2136 goto error; 2137 2138 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, 2139 private_data, private_data_len); 2140 2141 ret = ib_post_send_mad(msg, NULL); 2142 if (ret) { 2143 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2144 cm_free_msg(msg); 2145 kfree(data); 2146 return ret; 2147 } 2148 2149 cm_id->state = IB_CM_ESTABLISHED; 2150 cm_set_private_data(cm_id_priv, data, private_data_len); 2151 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2152 return 0; 2153 2154 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2155 kfree(data); 2156 return ret; 2157 } 2158 EXPORT_SYMBOL(ib_send_cm_rtu); 2159 2160 static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type) 2161 { 2162 struct cm_rep_msg *rep_msg; 2163 struct ib_cm_rep_event_param *param; 2164 2165 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; 2166 param = &work->cm_event.param.rep_rcvd; 2167 param->remote_ca_guid = rep_msg->local_ca_guid; 2168 param->remote_qkey = be32_to_cpu(rep_msg->local_qkey); 2169 param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type)); 2170 param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg)); 2171 param->responder_resources = rep_msg->initiator_depth; 2172 param->initiator_depth = rep_msg->resp_resources; 2173 param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg); 2174 param->failover_accepted = cm_rep_get_failover(rep_msg); 2175 param->flow_control = cm_rep_get_flow_ctrl(rep_msg); 2176 param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg); 2177 param->srq = cm_rep_get_srq(rep_msg); 2178 work->cm_event.private_data = &rep_msg->private_data; 2179 } 2180 2181 static void cm_dup_rep_handler(struct cm_work *work) 2182 { 2183 struct cm_id_private *cm_id_priv; 2184 struct cm_rep_msg *rep_msg; 2185 struct ib_mad_send_buf *msg = NULL; 2186 int ret; 2187 2188 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad; 2189 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 2190 rep_msg->local_comm_id); 2191 if (!cm_id_priv) 2192 return; 2193 2194 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. 2195 counter[CM_REP_COUNTER]); 2196 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); 2197 if (ret) 2198 goto deref; 2199 2200 spin_lock_irq(&cm_id_priv->lock); 2201 if (cm_id_priv->id.state == IB_CM_ESTABLISHED) 2202 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, 2203 cm_id_priv->private_data, 2204 cm_id_priv->private_data_len); 2205 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT) 2206 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2207 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout, 2208 cm_id_priv->private_data, 2209 cm_id_priv->private_data_len); 2210 else 2211 goto unlock; 2212 spin_unlock_irq(&cm_id_priv->lock); 2213 2214 ret = ib_post_send_mad(msg, NULL); 2215 if (ret) 2216 goto free; 2217 goto deref; 2218 2219 unlock: spin_unlock_irq(&cm_id_priv->lock); 2220 free: cm_free_msg(msg); 2221 deref: cm_deref_id(cm_id_priv); 2222 } 2223 2224 static int cm_rep_handler(struct cm_work *work) 2225 { 2226 struct cm_id_private *cm_id_priv; 2227 struct cm_rep_msg *rep_msg; 2228 int ret; 2229 struct cm_id_private *cur_cm_id_priv; 2230 struct ib_cm_id *cm_id; 2231 struct cm_timewait_info *timewait_info; 2232 2233 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; 2234 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0); 2235 if (!cm_id_priv) { 2236 cm_dup_rep_handler(work); 2237 pr_debug("%s: remote_comm_id %d, no cm_id_priv\n", __func__, 2238 be32_to_cpu(rep_msg->remote_comm_id)); 2239 return -EINVAL; 2240 } 2241 2242 cm_format_rep_event(work, cm_id_priv->qp_type); 2243 2244 spin_lock_irq(&cm_id_priv->lock); 2245 switch (cm_id_priv->id.state) { 2246 case IB_CM_REQ_SENT: 2247 case IB_CM_MRA_REQ_RCVD: 2248 break; 2249 default: 2250 spin_unlock_irq(&cm_id_priv->lock); 2251 ret = -EINVAL; 2252 pr_debug("%s: cm_id_priv->id.state: %d, local_comm_id %d, remote_comm_id %d\n", 2253 __func__, cm_id_priv->id.state, 2254 be32_to_cpu(rep_msg->local_comm_id), 2255 be32_to_cpu(rep_msg->remote_comm_id)); 2256 goto error; 2257 } 2258 2259 cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id; 2260 cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid; 2261 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type); 2262 2263 spin_lock(&cm.lock); 2264 /* Check for duplicate REP. */ 2265 if (cm_insert_remote_id(cm_id_priv->timewait_info)) { 2266 spin_unlock(&cm.lock); 2267 spin_unlock_irq(&cm_id_priv->lock); 2268 ret = -EINVAL; 2269 pr_debug("%s: Failed to insert remote id %d\n", __func__, 2270 be32_to_cpu(rep_msg->remote_comm_id)); 2271 goto error; 2272 } 2273 /* Check for a stale connection. */ 2274 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info); 2275 if (timewait_info) { 2276 rb_erase(&cm_id_priv->timewait_info->remote_id_node, 2277 &cm.remote_id_table); 2278 cm_id_priv->timewait_info->inserted_remote_id = 0; 2279 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id, 2280 timewait_info->work.remote_id); 2281 2282 spin_unlock(&cm.lock); 2283 spin_unlock_irq(&cm_id_priv->lock); 2284 cm_issue_rej(work->port, work->mad_recv_wc, 2285 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP, 2286 NULL, 0); 2287 ret = -EINVAL; 2288 pr_debug("%s: Stale connection. local_comm_id %d, remote_comm_id %d\n", 2289 __func__, be32_to_cpu(rep_msg->local_comm_id), 2290 be32_to_cpu(rep_msg->remote_comm_id)); 2291 2292 if (cur_cm_id_priv) { 2293 cm_id = &cur_cm_id_priv->id; 2294 ib_send_cm_dreq(cm_id, NULL, 0); 2295 cm_deref_id(cur_cm_id_priv); 2296 } 2297 2298 goto error; 2299 } 2300 spin_unlock(&cm.lock); 2301 2302 cm_id_priv->id.state = IB_CM_REP_RCVD; 2303 cm_id_priv->id.remote_id = rep_msg->local_comm_id; 2304 cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type); 2305 cm_id_priv->initiator_depth = rep_msg->resp_resources; 2306 cm_id_priv->responder_resources = rep_msg->initiator_depth; 2307 cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg); 2308 cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg); 2309 cm_id_priv->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg); 2310 cm_id_priv->av.timeout = 2311 cm_ack_timeout(cm_id_priv->target_ack_delay, 2312 cm_id_priv->av.timeout - 1); 2313 cm_id_priv->alt_av.timeout = 2314 cm_ack_timeout(cm_id_priv->target_ack_delay, 2315 cm_id_priv->alt_av.timeout - 1); 2316 2317 /* todo: handle peer_to_peer */ 2318 2319 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2320 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2321 if (!ret) 2322 list_add_tail(&work->list, &cm_id_priv->work_list); 2323 spin_unlock_irq(&cm_id_priv->lock); 2324 2325 if (ret) 2326 cm_process_work(cm_id_priv, work); 2327 else 2328 cm_deref_id(cm_id_priv); 2329 return 0; 2330 2331 error: 2332 cm_deref_id(cm_id_priv); 2333 return ret; 2334 } 2335 2336 static int cm_establish_handler(struct cm_work *work) 2337 { 2338 struct cm_id_private *cm_id_priv; 2339 int ret; 2340 2341 /* See comment in cm_establish about lookup. */ 2342 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id); 2343 if (!cm_id_priv) 2344 return -EINVAL; 2345 2346 spin_lock_irq(&cm_id_priv->lock); 2347 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) { 2348 spin_unlock_irq(&cm_id_priv->lock); 2349 goto out; 2350 } 2351 2352 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2353 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2354 if (!ret) 2355 list_add_tail(&work->list, &cm_id_priv->work_list); 2356 spin_unlock_irq(&cm_id_priv->lock); 2357 2358 if (ret) 2359 cm_process_work(cm_id_priv, work); 2360 else 2361 cm_deref_id(cm_id_priv); 2362 return 0; 2363 out: 2364 cm_deref_id(cm_id_priv); 2365 return -EINVAL; 2366 } 2367 2368 static int cm_rtu_handler(struct cm_work *work) 2369 { 2370 struct cm_id_private *cm_id_priv; 2371 struct cm_rtu_msg *rtu_msg; 2372 int ret; 2373 2374 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad; 2375 cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id, 2376 rtu_msg->local_comm_id); 2377 if (!cm_id_priv) 2378 return -EINVAL; 2379 2380 work->cm_event.private_data = &rtu_msg->private_data; 2381 2382 spin_lock_irq(&cm_id_priv->lock); 2383 if (cm_id_priv->id.state != IB_CM_REP_SENT && 2384 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) { 2385 spin_unlock_irq(&cm_id_priv->lock); 2386 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. 2387 counter[CM_RTU_COUNTER]); 2388 goto out; 2389 } 2390 cm_id_priv->id.state = IB_CM_ESTABLISHED; 2391 2392 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2393 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2394 if (!ret) 2395 list_add_tail(&work->list, &cm_id_priv->work_list); 2396 spin_unlock_irq(&cm_id_priv->lock); 2397 2398 if (ret) 2399 cm_process_work(cm_id_priv, work); 2400 else 2401 cm_deref_id(cm_id_priv); 2402 return 0; 2403 out: 2404 cm_deref_id(cm_id_priv); 2405 return -EINVAL; 2406 } 2407 2408 static void cm_format_dreq(struct cm_dreq_msg *dreq_msg, 2409 struct cm_id_private *cm_id_priv, 2410 const void *private_data, 2411 u8 private_data_len) 2412 { 2413 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID, 2414 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ)); 2415 dreq_msg->local_comm_id = cm_id_priv->id.local_id; 2416 dreq_msg->remote_comm_id = cm_id_priv->id.remote_id; 2417 cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn); 2418 2419 if (private_data && private_data_len) 2420 memcpy(dreq_msg->private_data, private_data, private_data_len); 2421 } 2422 2423 int ib_send_cm_dreq(struct ib_cm_id *cm_id, 2424 const void *private_data, 2425 u8 private_data_len) 2426 { 2427 struct cm_id_private *cm_id_priv; 2428 struct ib_mad_send_buf *msg; 2429 unsigned long flags; 2430 int ret; 2431 2432 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE) 2433 return -EINVAL; 2434 2435 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2436 spin_lock_irqsave(&cm_id_priv->lock, flags); 2437 if (cm_id->state != IB_CM_ESTABLISHED) { 2438 pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__, 2439 be32_to_cpu(cm_id->local_id), cm_id->state); 2440 ret = -EINVAL; 2441 goto out; 2442 } 2443 2444 if (cm_id->lap_state == IB_CM_LAP_SENT || 2445 cm_id->lap_state == IB_CM_MRA_LAP_RCVD) 2446 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2447 2448 ret = cm_alloc_msg(cm_id_priv, &msg); 2449 if (ret) { 2450 cm_enter_timewait(cm_id_priv); 2451 goto out; 2452 } 2453 2454 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv, 2455 private_data, private_data_len); 2456 msg->timeout_ms = cm_id_priv->timeout_ms; 2457 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT; 2458 2459 ret = ib_post_send_mad(msg, NULL); 2460 if (ret) { 2461 cm_enter_timewait(cm_id_priv); 2462 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2463 cm_free_msg(msg); 2464 return ret; 2465 } 2466 2467 cm_id->state = IB_CM_DREQ_SENT; 2468 cm_id_priv->msg = msg; 2469 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2470 return ret; 2471 } 2472 EXPORT_SYMBOL(ib_send_cm_dreq); 2473 2474 static void cm_format_drep(struct cm_drep_msg *drep_msg, 2475 struct cm_id_private *cm_id_priv, 2476 const void *private_data, 2477 u8 private_data_len) 2478 { 2479 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid); 2480 drep_msg->local_comm_id = cm_id_priv->id.local_id; 2481 drep_msg->remote_comm_id = cm_id_priv->id.remote_id; 2482 2483 if (private_data && private_data_len) 2484 memcpy(drep_msg->private_data, private_data, private_data_len); 2485 } 2486 2487 int ib_send_cm_drep(struct ib_cm_id *cm_id, 2488 const void *private_data, 2489 u8 private_data_len) 2490 { 2491 struct cm_id_private *cm_id_priv; 2492 struct ib_mad_send_buf *msg; 2493 unsigned long flags; 2494 void *data; 2495 int ret; 2496 2497 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE) 2498 return -EINVAL; 2499 2500 data = cm_copy_private_data(private_data, private_data_len); 2501 if (IS_ERR(data)) 2502 return PTR_ERR(data); 2503 2504 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2505 spin_lock_irqsave(&cm_id_priv->lock, flags); 2506 if (cm_id->state != IB_CM_DREQ_RCVD) { 2507 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2508 kfree(data); 2509 pr_debug("%s: local_id %d, cm_idcm_id->state(%d) != IB_CM_DREQ_RCVD\n", 2510 __func__, be32_to_cpu(cm_id->local_id), cm_id->state); 2511 return -EINVAL; 2512 } 2513 2514 cm_set_private_data(cm_id_priv, data, private_data_len); 2515 cm_enter_timewait(cm_id_priv); 2516 2517 ret = cm_alloc_msg(cm_id_priv, &msg); 2518 if (ret) 2519 goto out; 2520 2521 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, 2522 private_data, private_data_len); 2523 2524 ret = ib_post_send_mad(msg, NULL); 2525 if (ret) { 2526 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2527 cm_free_msg(msg); 2528 return ret; 2529 } 2530 2531 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2532 return ret; 2533 } 2534 EXPORT_SYMBOL(ib_send_cm_drep); 2535 2536 static int cm_issue_drep(struct cm_port *port, 2537 struct ib_mad_recv_wc *mad_recv_wc) 2538 { 2539 struct ib_mad_send_buf *msg = NULL; 2540 struct cm_dreq_msg *dreq_msg; 2541 struct cm_drep_msg *drep_msg; 2542 int ret; 2543 2544 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg); 2545 if (ret) 2546 return ret; 2547 2548 dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad; 2549 drep_msg = (struct cm_drep_msg *) msg->mad; 2550 2551 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid); 2552 drep_msg->remote_comm_id = dreq_msg->local_comm_id; 2553 drep_msg->local_comm_id = dreq_msg->remote_comm_id; 2554 2555 ret = ib_post_send_mad(msg, NULL); 2556 if (ret) 2557 cm_free_msg(msg); 2558 2559 return ret; 2560 } 2561 2562 static int cm_dreq_handler(struct cm_work *work) 2563 { 2564 struct cm_id_private *cm_id_priv; 2565 struct cm_dreq_msg *dreq_msg; 2566 struct ib_mad_send_buf *msg = NULL; 2567 int ret; 2568 2569 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad; 2570 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id, 2571 dreq_msg->local_comm_id); 2572 if (!cm_id_priv) { 2573 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. 2574 counter[CM_DREQ_COUNTER]); 2575 cm_issue_drep(work->port, work->mad_recv_wc); 2576 pr_debug("%s: no cm_id_priv, local_comm_id %d, remote_comm_id %d\n", 2577 __func__, be32_to_cpu(dreq_msg->local_comm_id), 2578 be32_to_cpu(dreq_msg->remote_comm_id)); 2579 return -EINVAL; 2580 } 2581 2582 work->cm_event.private_data = &dreq_msg->private_data; 2583 2584 spin_lock_irq(&cm_id_priv->lock); 2585 if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg)) 2586 goto unlock; 2587 2588 switch (cm_id_priv->id.state) { 2589 case IB_CM_REP_SENT: 2590 case IB_CM_DREQ_SENT: 2591 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2592 break; 2593 case IB_CM_ESTABLISHED: 2594 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT || 2595 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) 2596 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2597 break; 2598 case IB_CM_MRA_REP_RCVD: 2599 break; 2600 case IB_CM_TIMEWAIT: 2601 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. 2602 counter[CM_DREQ_COUNTER]); 2603 msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc); 2604 if (IS_ERR(msg)) 2605 goto unlock; 2606 2607 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, 2608 cm_id_priv->private_data, 2609 cm_id_priv->private_data_len); 2610 spin_unlock_irq(&cm_id_priv->lock); 2611 2612 if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) || 2613 ib_post_send_mad(msg, NULL)) 2614 cm_free_msg(msg); 2615 goto deref; 2616 case IB_CM_DREQ_RCVD: 2617 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. 2618 counter[CM_DREQ_COUNTER]); 2619 goto unlock; 2620 default: 2621 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n", 2622 __func__, be32_to_cpu(cm_id_priv->id.local_id), 2623 cm_id_priv->id.state); 2624 goto unlock; 2625 } 2626 cm_id_priv->id.state = IB_CM_DREQ_RCVD; 2627 cm_id_priv->tid = dreq_msg->hdr.tid; 2628 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2629 if (!ret) 2630 list_add_tail(&work->list, &cm_id_priv->work_list); 2631 spin_unlock_irq(&cm_id_priv->lock); 2632 2633 if (ret) 2634 cm_process_work(cm_id_priv, work); 2635 else 2636 cm_deref_id(cm_id_priv); 2637 return 0; 2638 2639 unlock: spin_unlock_irq(&cm_id_priv->lock); 2640 deref: cm_deref_id(cm_id_priv); 2641 return -EINVAL; 2642 } 2643 2644 static int cm_drep_handler(struct cm_work *work) 2645 { 2646 struct cm_id_private *cm_id_priv; 2647 struct cm_drep_msg *drep_msg; 2648 int ret; 2649 2650 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad; 2651 cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id, 2652 drep_msg->local_comm_id); 2653 if (!cm_id_priv) 2654 return -EINVAL; 2655 2656 work->cm_event.private_data = &drep_msg->private_data; 2657 2658 spin_lock_irq(&cm_id_priv->lock); 2659 if (cm_id_priv->id.state != IB_CM_DREQ_SENT && 2660 cm_id_priv->id.state != IB_CM_DREQ_RCVD) { 2661 spin_unlock_irq(&cm_id_priv->lock); 2662 goto out; 2663 } 2664 cm_enter_timewait(cm_id_priv); 2665 2666 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2667 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2668 if (!ret) 2669 list_add_tail(&work->list, &cm_id_priv->work_list); 2670 spin_unlock_irq(&cm_id_priv->lock); 2671 2672 if (ret) 2673 cm_process_work(cm_id_priv, work); 2674 else 2675 cm_deref_id(cm_id_priv); 2676 return 0; 2677 out: 2678 cm_deref_id(cm_id_priv); 2679 return -EINVAL; 2680 } 2681 2682 int ib_send_cm_rej(struct ib_cm_id *cm_id, 2683 enum ib_cm_rej_reason reason, 2684 void *ari, 2685 u8 ari_length, 2686 const void *private_data, 2687 u8 private_data_len) 2688 { 2689 struct cm_id_private *cm_id_priv; 2690 struct ib_mad_send_buf *msg; 2691 unsigned long flags; 2692 int ret; 2693 2694 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) || 2695 (ari && ari_length > IB_CM_REJ_ARI_LENGTH)) 2696 return -EINVAL; 2697 2698 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2699 2700 spin_lock_irqsave(&cm_id_priv->lock, flags); 2701 switch (cm_id->state) { 2702 case IB_CM_REQ_SENT: 2703 case IB_CM_MRA_REQ_RCVD: 2704 case IB_CM_REQ_RCVD: 2705 case IB_CM_MRA_REQ_SENT: 2706 case IB_CM_REP_RCVD: 2707 case IB_CM_MRA_REP_SENT: 2708 ret = cm_alloc_msg(cm_id_priv, &msg); 2709 if (!ret) 2710 cm_format_rej((struct cm_rej_msg *) msg->mad, 2711 cm_id_priv, reason, ari, ari_length, 2712 private_data, private_data_len); 2713 2714 cm_reset_to_idle(cm_id_priv); 2715 break; 2716 case IB_CM_REP_SENT: 2717 case IB_CM_MRA_REP_RCVD: 2718 ret = cm_alloc_msg(cm_id_priv, &msg); 2719 if (!ret) 2720 cm_format_rej((struct cm_rej_msg *) msg->mad, 2721 cm_id_priv, reason, ari, ari_length, 2722 private_data, private_data_len); 2723 2724 cm_enter_timewait(cm_id_priv); 2725 break; 2726 default: 2727 pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__, 2728 be32_to_cpu(cm_id_priv->id.local_id), cm_id->state); 2729 ret = -EINVAL; 2730 goto out; 2731 } 2732 2733 if (ret) 2734 goto out; 2735 2736 ret = ib_post_send_mad(msg, NULL); 2737 if (ret) 2738 cm_free_msg(msg); 2739 2740 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2741 return ret; 2742 } 2743 EXPORT_SYMBOL(ib_send_cm_rej); 2744 2745 static void cm_format_rej_event(struct cm_work *work) 2746 { 2747 struct cm_rej_msg *rej_msg; 2748 struct ib_cm_rej_event_param *param; 2749 2750 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; 2751 param = &work->cm_event.param.rej_rcvd; 2752 param->ari = rej_msg->ari; 2753 param->ari_length = cm_rej_get_reject_info_len(rej_msg); 2754 param->reason = __be16_to_cpu(rej_msg->reason); 2755 work->cm_event.private_data = &rej_msg->private_data; 2756 } 2757 2758 static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg) 2759 { 2760 struct cm_timewait_info *timewait_info; 2761 struct cm_id_private *cm_id_priv; 2762 __be32 remote_id; 2763 2764 remote_id = rej_msg->local_comm_id; 2765 2766 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) { 2767 spin_lock_irq(&cm.lock); 2768 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari), 2769 remote_id); 2770 if (!timewait_info) { 2771 spin_unlock_irq(&cm.lock); 2772 return NULL; 2773 } 2774 cm_id_priv = idr_find(&cm.local_id_table, (__force int) 2775 (timewait_info->work.local_id ^ 2776 cm.random_id_operand)); 2777 if (cm_id_priv) { 2778 if (cm_id_priv->id.remote_id == remote_id) 2779 atomic_inc(&cm_id_priv->refcount); 2780 else 2781 cm_id_priv = NULL; 2782 } 2783 spin_unlock_irq(&cm.lock); 2784 } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ) 2785 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0); 2786 else 2787 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id); 2788 2789 return cm_id_priv; 2790 } 2791 2792 static int cm_rej_handler(struct cm_work *work) 2793 { 2794 struct cm_id_private *cm_id_priv; 2795 struct cm_rej_msg *rej_msg; 2796 int ret; 2797 2798 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; 2799 cm_id_priv = cm_acquire_rejected_id(rej_msg); 2800 if (!cm_id_priv) 2801 return -EINVAL; 2802 2803 cm_format_rej_event(work); 2804 2805 spin_lock_irq(&cm_id_priv->lock); 2806 switch (cm_id_priv->id.state) { 2807 case IB_CM_REQ_SENT: 2808 case IB_CM_MRA_REQ_RCVD: 2809 case IB_CM_REP_SENT: 2810 case IB_CM_MRA_REP_RCVD: 2811 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2812 /* fall through */ 2813 case IB_CM_REQ_RCVD: 2814 case IB_CM_MRA_REQ_SENT: 2815 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN) 2816 cm_enter_timewait(cm_id_priv); 2817 else 2818 cm_reset_to_idle(cm_id_priv); 2819 break; 2820 case IB_CM_DREQ_SENT: 2821 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2822 /* fall through */ 2823 case IB_CM_REP_RCVD: 2824 case IB_CM_MRA_REP_SENT: 2825 cm_enter_timewait(cm_id_priv); 2826 break; 2827 case IB_CM_ESTABLISHED: 2828 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT || 2829 cm_id_priv->id.lap_state == IB_CM_LAP_SENT) { 2830 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT) 2831 ib_cancel_mad(cm_id_priv->av.port->mad_agent, 2832 cm_id_priv->msg); 2833 cm_enter_timewait(cm_id_priv); 2834 break; 2835 } 2836 /* fall through */ 2837 default: 2838 spin_unlock_irq(&cm_id_priv->lock); 2839 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n", 2840 __func__, be32_to_cpu(cm_id_priv->id.local_id), 2841 cm_id_priv->id.state); 2842 ret = -EINVAL; 2843 goto out; 2844 } 2845 2846 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2847 if (!ret) 2848 list_add_tail(&work->list, &cm_id_priv->work_list); 2849 spin_unlock_irq(&cm_id_priv->lock); 2850 2851 if (ret) 2852 cm_process_work(cm_id_priv, work); 2853 else 2854 cm_deref_id(cm_id_priv); 2855 return 0; 2856 out: 2857 cm_deref_id(cm_id_priv); 2858 return -EINVAL; 2859 } 2860 2861 int ib_send_cm_mra(struct ib_cm_id *cm_id, 2862 u8 service_timeout, 2863 const void *private_data, 2864 u8 private_data_len) 2865 { 2866 struct cm_id_private *cm_id_priv; 2867 struct ib_mad_send_buf *msg; 2868 enum ib_cm_state cm_state; 2869 enum ib_cm_lap_state lap_state; 2870 enum cm_msg_response msg_response; 2871 void *data; 2872 unsigned long flags; 2873 int ret; 2874 2875 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE) 2876 return -EINVAL; 2877 2878 data = cm_copy_private_data(private_data, private_data_len); 2879 if (IS_ERR(data)) 2880 return PTR_ERR(data); 2881 2882 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2883 2884 spin_lock_irqsave(&cm_id_priv->lock, flags); 2885 switch(cm_id_priv->id.state) { 2886 case IB_CM_REQ_RCVD: 2887 cm_state = IB_CM_MRA_REQ_SENT; 2888 lap_state = cm_id->lap_state; 2889 msg_response = CM_MSG_RESPONSE_REQ; 2890 break; 2891 case IB_CM_REP_RCVD: 2892 cm_state = IB_CM_MRA_REP_SENT; 2893 lap_state = cm_id->lap_state; 2894 msg_response = CM_MSG_RESPONSE_REP; 2895 break; 2896 case IB_CM_ESTABLISHED: 2897 if (cm_id->lap_state == IB_CM_LAP_RCVD) { 2898 cm_state = cm_id->state; 2899 lap_state = IB_CM_MRA_LAP_SENT; 2900 msg_response = CM_MSG_RESPONSE_OTHER; 2901 break; 2902 } 2903 /* fall through */ 2904 default: 2905 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n", 2906 __func__, be32_to_cpu(cm_id_priv->id.local_id), 2907 cm_id_priv->id.state); 2908 ret = -EINVAL; 2909 goto error1; 2910 } 2911 2912 if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) { 2913 ret = cm_alloc_msg(cm_id_priv, &msg); 2914 if (ret) 2915 goto error1; 2916 2917 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2918 msg_response, service_timeout, 2919 private_data, private_data_len); 2920 ret = ib_post_send_mad(msg, NULL); 2921 if (ret) 2922 goto error2; 2923 } 2924 2925 cm_id->state = cm_state; 2926 cm_id->lap_state = lap_state; 2927 cm_id_priv->service_timeout = service_timeout; 2928 cm_set_private_data(cm_id_priv, data, private_data_len); 2929 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2930 return 0; 2931 2932 error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2933 kfree(data); 2934 return ret; 2935 2936 error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2937 kfree(data); 2938 cm_free_msg(msg); 2939 return ret; 2940 } 2941 EXPORT_SYMBOL(ib_send_cm_mra); 2942 2943 static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg) 2944 { 2945 switch (cm_mra_get_msg_mraed(mra_msg)) { 2946 case CM_MSG_RESPONSE_REQ: 2947 return cm_acquire_id(mra_msg->remote_comm_id, 0); 2948 case CM_MSG_RESPONSE_REP: 2949 case CM_MSG_RESPONSE_OTHER: 2950 return cm_acquire_id(mra_msg->remote_comm_id, 2951 mra_msg->local_comm_id); 2952 default: 2953 return NULL; 2954 } 2955 } 2956 2957 static int cm_mra_handler(struct cm_work *work) 2958 { 2959 struct cm_id_private *cm_id_priv; 2960 struct cm_mra_msg *mra_msg; 2961 int timeout, ret; 2962 2963 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad; 2964 cm_id_priv = cm_acquire_mraed_id(mra_msg); 2965 if (!cm_id_priv) 2966 return -EINVAL; 2967 2968 work->cm_event.private_data = &mra_msg->private_data; 2969 work->cm_event.param.mra_rcvd.service_timeout = 2970 cm_mra_get_service_timeout(mra_msg); 2971 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) + 2972 cm_convert_to_ms(cm_id_priv->av.timeout); 2973 2974 spin_lock_irq(&cm_id_priv->lock); 2975 switch (cm_id_priv->id.state) { 2976 case IB_CM_REQ_SENT: 2977 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ || 2978 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2979 cm_id_priv->msg, timeout)) 2980 goto out; 2981 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD; 2982 break; 2983 case IB_CM_REP_SENT: 2984 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP || 2985 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2986 cm_id_priv->msg, timeout)) 2987 goto out; 2988 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD; 2989 break; 2990 case IB_CM_ESTABLISHED: 2991 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER || 2992 cm_id_priv->id.lap_state != IB_CM_LAP_SENT || 2993 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2994 cm_id_priv->msg, timeout)) { 2995 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) 2996 atomic_long_inc(&work->port-> 2997 counter_group[CM_RECV_DUPLICATES]. 2998 counter[CM_MRA_COUNTER]); 2999 goto out; 3000 } 3001 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD; 3002 break; 3003 case IB_CM_MRA_REQ_RCVD: 3004 case IB_CM_MRA_REP_RCVD: 3005 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. 3006 counter[CM_MRA_COUNTER]); 3007 /* fall through */ 3008 default: 3009 pr_debug("%s local_id %d, cm_id_priv->id.state: %d\n", 3010 __func__, be32_to_cpu(cm_id_priv->id.local_id), 3011 cm_id_priv->id.state); 3012 goto out; 3013 } 3014 3015 cm_id_priv->msg->context[1] = (void *) (unsigned long) 3016 cm_id_priv->id.state; 3017 ret = atomic_inc_and_test(&cm_id_priv->work_count); 3018 if (!ret) 3019 list_add_tail(&work->list, &cm_id_priv->work_list); 3020 spin_unlock_irq(&cm_id_priv->lock); 3021 3022 if (ret) 3023 cm_process_work(cm_id_priv, work); 3024 else 3025 cm_deref_id(cm_id_priv); 3026 return 0; 3027 out: 3028 spin_unlock_irq(&cm_id_priv->lock); 3029 cm_deref_id(cm_id_priv); 3030 return -EINVAL; 3031 } 3032 3033 static void cm_format_lap(struct cm_lap_msg *lap_msg, 3034 struct cm_id_private *cm_id_priv, 3035 struct sa_path_rec *alternate_path, 3036 const void *private_data, 3037 u8 private_data_len) 3038 { 3039 bool alt_ext = false; 3040 3041 if (alternate_path->rec_type == SA_PATH_REC_TYPE_OPA) 3042 alt_ext = opa_is_extended_lid(alternate_path->opa.dlid, 3043 alternate_path->opa.slid); 3044 cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID, 3045 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP)); 3046 lap_msg->local_comm_id = cm_id_priv->id.local_id; 3047 lap_msg->remote_comm_id = cm_id_priv->id.remote_id; 3048 cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn); 3049 /* todo: need remote CM response timeout */ 3050 cm_lap_set_remote_resp_timeout(lap_msg, 0x1F); 3051 lap_msg->alt_local_lid = 3052 htons(ntohl(sa_path_get_slid(alternate_path))); 3053 lap_msg->alt_remote_lid = 3054 htons(ntohl(sa_path_get_dlid(alternate_path))); 3055 lap_msg->alt_local_gid = alternate_path->sgid; 3056 lap_msg->alt_remote_gid = alternate_path->dgid; 3057 if (alt_ext) { 3058 lap_msg->alt_local_gid.global.interface_id 3059 = OPA_MAKE_ID(be32_to_cpu(alternate_path->opa.slid)); 3060 lap_msg->alt_remote_gid.global.interface_id 3061 = OPA_MAKE_ID(be32_to_cpu(alternate_path->opa.dlid)); 3062 } 3063 cm_lap_set_flow_label(lap_msg, alternate_path->flow_label); 3064 cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class); 3065 lap_msg->alt_hop_limit = alternate_path->hop_limit; 3066 cm_lap_set_packet_rate(lap_msg, alternate_path->rate); 3067 cm_lap_set_sl(lap_msg, alternate_path->sl); 3068 cm_lap_set_subnet_local(lap_msg, 1); /* local only... */ 3069 cm_lap_set_local_ack_timeout(lap_msg, 3070 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay, 3071 alternate_path->packet_life_time)); 3072 3073 if (private_data && private_data_len) 3074 memcpy(lap_msg->private_data, private_data, private_data_len); 3075 } 3076 3077 int ib_send_cm_lap(struct ib_cm_id *cm_id, 3078 struct sa_path_rec *alternate_path, 3079 const void *private_data, 3080 u8 private_data_len) 3081 { 3082 struct cm_id_private *cm_id_priv; 3083 struct ib_mad_send_buf *msg; 3084 unsigned long flags; 3085 int ret; 3086 3087 if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE) 3088 return -EINVAL; 3089 3090 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3091 spin_lock_irqsave(&cm_id_priv->lock, flags); 3092 if (cm_id->state != IB_CM_ESTABLISHED || 3093 (cm_id->lap_state != IB_CM_LAP_UNINIT && 3094 cm_id->lap_state != IB_CM_LAP_IDLE)) { 3095 ret = -EINVAL; 3096 goto out; 3097 } 3098 3099 ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av, 3100 cm_id_priv); 3101 if (ret) 3102 goto out; 3103 cm_id_priv->alt_av.timeout = 3104 cm_ack_timeout(cm_id_priv->target_ack_delay, 3105 cm_id_priv->alt_av.timeout - 1); 3106 3107 ret = cm_alloc_msg(cm_id_priv, &msg); 3108 if (ret) 3109 goto out; 3110 3111 cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv, 3112 alternate_path, private_data, private_data_len); 3113 msg->timeout_ms = cm_id_priv->timeout_ms; 3114 msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED; 3115 3116 ret = ib_post_send_mad(msg, NULL); 3117 if (ret) { 3118 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3119 cm_free_msg(msg); 3120 return ret; 3121 } 3122 3123 cm_id->lap_state = IB_CM_LAP_SENT; 3124 cm_id_priv->msg = msg; 3125 3126 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3127 return ret; 3128 } 3129 EXPORT_SYMBOL(ib_send_cm_lap); 3130 3131 static void cm_format_path_lid_from_lap(struct cm_lap_msg *lap_msg, 3132 struct sa_path_rec *path) 3133 { 3134 u32 lid; 3135 3136 if (path->rec_type != SA_PATH_REC_TYPE_OPA) { 3137 sa_path_set_dlid(path, ntohs(lap_msg->alt_local_lid)); 3138 sa_path_set_slid(path, ntohs(lap_msg->alt_remote_lid)); 3139 } else { 3140 lid = opa_get_lid_from_gid(&lap_msg->alt_local_gid); 3141 sa_path_set_dlid(path, lid); 3142 3143 lid = opa_get_lid_from_gid(&lap_msg->alt_remote_gid); 3144 sa_path_set_slid(path, lid); 3145 } 3146 } 3147 3148 static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv, 3149 struct sa_path_rec *path, 3150 struct cm_lap_msg *lap_msg) 3151 { 3152 path->dgid = lap_msg->alt_local_gid; 3153 path->sgid = lap_msg->alt_remote_gid; 3154 path->flow_label = cm_lap_get_flow_label(lap_msg); 3155 path->hop_limit = lap_msg->alt_hop_limit; 3156 path->traffic_class = cm_lap_get_traffic_class(lap_msg); 3157 path->reversible = 1; 3158 path->pkey = cm_id_priv->pkey; 3159 path->sl = cm_lap_get_sl(lap_msg); 3160 path->mtu_selector = IB_SA_EQ; 3161 path->mtu = cm_id_priv->path_mtu; 3162 path->rate_selector = IB_SA_EQ; 3163 path->rate = cm_lap_get_packet_rate(lap_msg); 3164 path->packet_life_time_selector = IB_SA_EQ; 3165 path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg); 3166 path->packet_life_time -= (path->packet_life_time > 0); 3167 cm_format_path_lid_from_lap(lap_msg, path); 3168 } 3169 3170 static int cm_lap_handler(struct cm_work *work) 3171 { 3172 struct cm_id_private *cm_id_priv; 3173 struct cm_lap_msg *lap_msg; 3174 struct ib_cm_lap_event_param *param; 3175 struct ib_mad_send_buf *msg = NULL; 3176 int ret; 3177 3178 /* Currently Alternate path messages are not supported for 3179 * RoCE link layer. 3180 */ 3181 if (rdma_protocol_roce(work->port->cm_dev->ib_device, 3182 work->port->port_num)) 3183 return -EINVAL; 3184 3185 /* todo: verify LAP request and send reject APR if invalid. */ 3186 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad; 3187 cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id, 3188 lap_msg->local_comm_id); 3189 if (!cm_id_priv) 3190 return -EINVAL; 3191 3192 ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 3193 work->mad_recv_wc->recv_buf.grh, 3194 &cm_id_priv->av); 3195 if (ret) 3196 goto deref; 3197 3198 param = &work->cm_event.param.lap_rcvd; 3199 memset(&work->path[0], 0, sizeof(work->path[1])); 3200 cm_path_set_rec_type(work->port->cm_dev->ib_device, 3201 work->port->port_num, 3202 &work->path[0], 3203 &lap_msg->alt_local_gid); 3204 param->alternate_path = &work->path[0]; 3205 cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg); 3206 work->cm_event.private_data = &lap_msg->private_data; 3207 3208 spin_lock_irq(&cm_id_priv->lock); 3209 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) 3210 goto unlock; 3211 3212 switch (cm_id_priv->id.lap_state) { 3213 case IB_CM_LAP_UNINIT: 3214 case IB_CM_LAP_IDLE: 3215 break; 3216 case IB_CM_MRA_LAP_SENT: 3217 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. 3218 counter[CM_LAP_COUNTER]); 3219 msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc); 3220 if (IS_ERR(msg)) 3221 goto unlock; 3222 3223 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 3224 CM_MSG_RESPONSE_OTHER, 3225 cm_id_priv->service_timeout, 3226 cm_id_priv->private_data, 3227 cm_id_priv->private_data_len); 3228 spin_unlock_irq(&cm_id_priv->lock); 3229 3230 if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) || 3231 ib_post_send_mad(msg, NULL)) 3232 cm_free_msg(msg); 3233 goto deref; 3234 case IB_CM_LAP_RCVD: 3235 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. 3236 counter[CM_LAP_COUNTER]); 3237 goto unlock; 3238 default: 3239 goto unlock; 3240 } 3241 3242 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD; 3243 cm_id_priv->tid = lap_msg->hdr.tid; 3244 cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av, 3245 cm_id_priv); 3246 ret = atomic_inc_and_test(&cm_id_priv->work_count); 3247 if (!ret) 3248 list_add_tail(&work->list, &cm_id_priv->work_list); 3249 spin_unlock_irq(&cm_id_priv->lock); 3250 3251 if (ret) 3252 cm_process_work(cm_id_priv, work); 3253 else 3254 cm_deref_id(cm_id_priv); 3255 return 0; 3256 3257 unlock: spin_unlock_irq(&cm_id_priv->lock); 3258 deref: cm_deref_id(cm_id_priv); 3259 return -EINVAL; 3260 } 3261 3262 static void cm_format_apr(struct cm_apr_msg *apr_msg, 3263 struct cm_id_private *cm_id_priv, 3264 enum ib_cm_apr_status status, 3265 void *info, 3266 u8 info_length, 3267 const void *private_data, 3268 u8 private_data_len) 3269 { 3270 cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid); 3271 apr_msg->local_comm_id = cm_id_priv->id.local_id; 3272 apr_msg->remote_comm_id = cm_id_priv->id.remote_id; 3273 apr_msg->ap_status = (u8) status; 3274 3275 if (info && info_length) { 3276 apr_msg->info_length = info_length; 3277 memcpy(apr_msg->info, info, info_length); 3278 } 3279 3280 if (private_data && private_data_len) 3281 memcpy(apr_msg->private_data, private_data, private_data_len); 3282 } 3283 3284 int ib_send_cm_apr(struct ib_cm_id *cm_id, 3285 enum ib_cm_apr_status status, 3286 void *info, 3287 u8 info_length, 3288 const void *private_data, 3289 u8 private_data_len) 3290 { 3291 struct cm_id_private *cm_id_priv; 3292 struct ib_mad_send_buf *msg; 3293 unsigned long flags; 3294 int ret; 3295 3296 if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) || 3297 (info && info_length > IB_CM_APR_INFO_LENGTH)) 3298 return -EINVAL; 3299 3300 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3301 spin_lock_irqsave(&cm_id_priv->lock, flags); 3302 if (cm_id->state != IB_CM_ESTABLISHED || 3303 (cm_id->lap_state != IB_CM_LAP_RCVD && 3304 cm_id->lap_state != IB_CM_MRA_LAP_SENT)) { 3305 ret = -EINVAL; 3306 goto out; 3307 } 3308 3309 ret = cm_alloc_msg(cm_id_priv, &msg); 3310 if (ret) 3311 goto out; 3312 3313 cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status, 3314 info, info_length, private_data, private_data_len); 3315 ret = ib_post_send_mad(msg, NULL); 3316 if (ret) { 3317 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3318 cm_free_msg(msg); 3319 return ret; 3320 } 3321 3322 cm_id->lap_state = IB_CM_LAP_IDLE; 3323 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3324 return ret; 3325 } 3326 EXPORT_SYMBOL(ib_send_cm_apr); 3327 3328 static int cm_apr_handler(struct cm_work *work) 3329 { 3330 struct cm_id_private *cm_id_priv; 3331 struct cm_apr_msg *apr_msg; 3332 int ret; 3333 3334 /* Currently Alternate path messages are not supported for 3335 * RoCE link layer. 3336 */ 3337 if (rdma_protocol_roce(work->port->cm_dev->ib_device, 3338 work->port->port_num)) 3339 return -EINVAL; 3340 3341 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad; 3342 cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id, 3343 apr_msg->local_comm_id); 3344 if (!cm_id_priv) 3345 return -EINVAL; /* Unmatched reply. */ 3346 3347 work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status; 3348 work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info; 3349 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length; 3350 work->cm_event.private_data = &apr_msg->private_data; 3351 3352 spin_lock_irq(&cm_id_priv->lock); 3353 if (cm_id_priv->id.state != IB_CM_ESTABLISHED || 3354 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT && 3355 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) { 3356 spin_unlock_irq(&cm_id_priv->lock); 3357 goto out; 3358 } 3359 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE; 3360 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 3361 cm_id_priv->msg = NULL; 3362 3363 ret = atomic_inc_and_test(&cm_id_priv->work_count); 3364 if (!ret) 3365 list_add_tail(&work->list, &cm_id_priv->work_list); 3366 spin_unlock_irq(&cm_id_priv->lock); 3367 3368 if (ret) 3369 cm_process_work(cm_id_priv, work); 3370 else 3371 cm_deref_id(cm_id_priv); 3372 return 0; 3373 out: 3374 cm_deref_id(cm_id_priv); 3375 return -EINVAL; 3376 } 3377 3378 static int cm_timewait_handler(struct cm_work *work) 3379 { 3380 struct cm_timewait_info *timewait_info; 3381 struct cm_id_private *cm_id_priv; 3382 int ret; 3383 3384 timewait_info = (struct cm_timewait_info *)work; 3385 spin_lock_irq(&cm.lock); 3386 list_del(&timewait_info->list); 3387 spin_unlock_irq(&cm.lock); 3388 3389 cm_id_priv = cm_acquire_id(timewait_info->work.local_id, 3390 timewait_info->work.remote_id); 3391 if (!cm_id_priv) 3392 return -EINVAL; 3393 3394 spin_lock_irq(&cm_id_priv->lock); 3395 if (cm_id_priv->id.state != IB_CM_TIMEWAIT || 3396 cm_id_priv->remote_qpn != timewait_info->remote_qpn) { 3397 spin_unlock_irq(&cm_id_priv->lock); 3398 goto out; 3399 } 3400 cm_id_priv->id.state = IB_CM_IDLE; 3401 ret = atomic_inc_and_test(&cm_id_priv->work_count); 3402 if (!ret) 3403 list_add_tail(&work->list, &cm_id_priv->work_list); 3404 spin_unlock_irq(&cm_id_priv->lock); 3405 3406 if (ret) 3407 cm_process_work(cm_id_priv, work); 3408 else 3409 cm_deref_id(cm_id_priv); 3410 return 0; 3411 out: 3412 cm_deref_id(cm_id_priv); 3413 return -EINVAL; 3414 } 3415 3416 static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg, 3417 struct cm_id_private *cm_id_priv, 3418 struct ib_cm_sidr_req_param *param) 3419 { 3420 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID, 3421 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR)); 3422 sidr_req_msg->request_id = cm_id_priv->id.local_id; 3423 sidr_req_msg->pkey = param->path->pkey; 3424 sidr_req_msg->service_id = param->service_id; 3425 3426 if (param->private_data && param->private_data_len) 3427 memcpy(sidr_req_msg->private_data, param->private_data, 3428 param->private_data_len); 3429 } 3430 3431 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, 3432 struct ib_cm_sidr_req_param *param) 3433 { 3434 struct cm_id_private *cm_id_priv; 3435 struct ib_mad_send_buf *msg; 3436 unsigned long flags; 3437 int ret; 3438 3439 if (!param->path || (param->private_data && 3440 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE)) 3441 return -EINVAL; 3442 3443 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3444 ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv); 3445 if (ret) 3446 goto out; 3447 3448 cm_id->service_id = param->service_id; 3449 cm_id->service_mask = ~cpu_to_be64(0); 3450 cm_id_priv->timeout_ms = param->timeout_ms; 3451 cm_id_priv->max_cm_retries = param->max_cm_retries; 3452 ret = cm_alloc_msg(cm_id_priv, &msg); 3453 if (ret) 3454 goto out; 3455 3456 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv, 3457 param); 3458 msg->timeout_ms = cm_id_priv->timeout_ms; 3459 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT; 3460 3461 spin_lock_irqsave(&cm_id_priv->lock, flags); 3462 if (cm_id->state == IB_CM_IDLE) 3463 ret = ib_post_send_mad(msg, NULL); 3464 else 3465 ret = -EINVAL; 3466 3467 if (ret) { 3468 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3469 cm_free_msg(msg); 3470 goto out; 3471 } 3472 cm_id->state = IB_CM_SIDR_REQ_SENT; 3473 cm_id_priv->msg = msg; 3474 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3475 out: 3476 return ret; 3477 } 3478 EXPORT_SYMBOL(ib_send_cm_sidr_req); 3479 3480 static void cm_format_sidr_req_event(struct cm_work *work, 3481 struct ib_cm_id *listen_id) 3482 { 3483 struct cm_sidr_req_msg *sidr_req_msg; 3484 struct ib_cm_sidr_req_event_param *param; 3485 3486 sidr_req_msg = (struct cm_sidr_req_msg *) 3487 work->mad_recv_wc->recv_buf.mad; 3488 param = &work->cm_event.param.sidr_req_rcvd; 3489 param->pkey = __be16_to_cpu(sidr_req_msg->pkey); 3490 param->listen_id = listen_id; 3491 param->service_id = sidr_req_msg->service_id; 3492 param->bth_pkey = cm_get_bth_pkey(work); 3493 param->port = work->port->port_num; 3494 work->cm_event.private_data = &sidr_req_msg->private_data; 3495 } 3496 3497 static int cm_sidr_req_handler(struct cm_work *work) 3498 { 3499 struct ib_cm_id *cm_id; 3500 struct cm_id_private *cm_id_priv, *cur_cm_id_priv; 3501 struct cm_sidr_req_msg *sidr_req_msg; 3502 struct ib_wc *wc; 3503 int ret; 3504 3505 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL); 3506 if (IS_ERR(cm_id)) 3507 return PTR_ERR(cm_id); 3508 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3509 3510 /* Record SGID/SLID and request ID for lookup. */ 3511 sidr_req_msg = (struct cm_sidr_req_msg *) 3512 work->mad_recv_wc->recv_buf.mad; 3513 wc = work->mad_recv_wc->wc; 3514 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid); 3515 cm_id_priv->av.dgid.global.interface_id = 0; 3516 ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 3517 work->mad_recv_wc->recv_buf.grh, 3518 &cm_id_priv->av); 3519 if (ret) 3520 goto out; 3521 3522 cm_id_priv->id.remote_id = sidr_req_msg->request_id; 3523 cm_id_priv->tid = sidr_req_msg->hdr.tid; 3524 atomic_inc(&cm_id_priv->work_count); 3525 3526 spin_lock_irq(&cm.lock); 3527 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv); 3528 if (cur_cm_id_priv) { 3529 spin_unlock_irq(&cm.lock); 3530 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. 3531 counter[CM_SIDR_REQ_COUNTER]); 3532 goto out; /* Duplicate message. */ 3533 } 3534 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD; 3535 cur_cm_id_priv = cm_find_listen(cm_id->device, 3536 sidr_req_msg->service_id); 3537 if (!cur_cm_id_priv) { 3538 spin_unlock_irq(&cm.lock); 3539 cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED); 3540 goto out; /* No match. */ 3541 } 3542 atomic_inc(&cur_cm_id_priv->refcount); 3543 atomic_inc(&cm_id_priv->refcount); 3544 spin_unlock_irq(&cm.lock); 3545 3546 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; 3547 cm_id_priv->id.context = cur_cm_id_priv->id.context; 3548 cm_id_priv->id.service_id = sidr_req_msg->service_id; 3549 cm_id_priv->id.service_mask = ~cpu_to_be64(0); 3550 3551 cm_format_sidr_req_event(work, &cur_cm_id_priv->id); 3552 cm_process_work(cm_id_priv, work); 3553 cm_deref_id(cur_cm_id_priv); 3554 return 0; 3555 out: 3556 ib_destroy_cm_id(&cm_id_priv->id); 3557 return -EINVAL; 3558 } 3559 3560 static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg, 3561 struct cm_id_private *cm_id_priv, 3562 struct ib_cm_sidr_rep_param *param) 3563 { 3564 cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID, 3565 cm_id_priv->tid); 3566 sidr_rep_msg->request_id = cm_id_priv->id.remote_id; 3567 sidr_rep_msg->status = param->status; 3568 cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num)); 3569 sidr_rep_msg->service_id = cm_id_priv->id.service_id; 3570 sidr_rep_msg->qkey = cpu_to_be32(param->qkey); 3571 3572 if (param->info && param->info_length) 3573 memcpy(sidr_rep_msg->info, param->info, param->info_length); 3574 3575 if (param->private_data && param->private_data_len) 3576 memcpy(sidr_rep_msg->private_data, param->private_data, 3577 param->private_data_len); 3578 } 3579 3580 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, 3581 struct ib_cm_sidr_rep_param *param) 3582 { 3583 struct cm_id_private *cm_id_priv; 3584 struct ib_mad_send_buf *msg; 3585 unsigned long flags; 3586 int ret; 3587 3588 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) || 3589 (param->private_data && 3590 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE)) 3591 return -EINVAL; 3592 3593 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3594 spin_lock_irqsave(&cm_id_priv->lock, flags); 3595 if (cm_id->state != IB_CM_SIDR_REQ_RCVD) { 3596 ret = -EINVAL; 3597 goto error; 3598 } 3599 3600 ret = cm_alloc_msg(cm_id_priv, &msg); 3601 if (ret) 3602 goto error; 3603 3604 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv, 3605 param); 3606 ret = ib_post_send_mad(msg, NULL); 3607 if (ret) { 3608 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3609 cm_free_msg(msg); 3610 return ret; 3611 } 3612 cm_id->state = IB_CM_IDLE; 3613 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3614 3615 spin_lock_irqsave(&cm.lock, flags); 3616 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) { 3617 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 3618 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node); 3619 } 3620 spin_unlock_irqrestore(&cm.lock, flags); 3621 return 0; 3622 3623 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3624 return ret; 3625 } 3626 EXPORT_SYMBOL(ib_send_cm_sidr_rep); 3627 3628 static void cm_format_sidr_rep_event(struct cm_work *work) 3629 { 3630 struct cm_sidr_rep_msg *sidr_rep_msg; 3631 struct ib_cm_sidr_rep_event_param *param; 3632 3633 sidr_rep_msg = (struct cm_sidr_rep_msg *) 3634 work->mad_recv_wc->recv_buf.mad; 3635 param = &work->cm_event.param.sidr_rep_rcvd; 3636 param->status = sidr_rep_msg->status; 3637 param->qkey = be32_to_cpu(sidr_rep_msg->qkey); 3638 param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg)); 3639 param->info = &sidr_rep_msg->info; 3640 param->info_len = sidr_rep_msg->info_length; 3641 work->cm_event.private_data = &sidr_rep_msg->private_data; 3642 } 3643 3644 static int cm_sidr_rep_handler(struct cm_work *work) 3645 { 3646 struct cm_sidr_rep_msg *sidr_rep_msg; 3647 struct cm_id_private *cm_id_priv; 3648 3649 sidr_rep_msg = (struct cm_sidr_rep_msg *) 3650 work->mad_recv_wc->recv_buf.mad; 3651 cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0); 3652 if (!cm_id_priv) 3653 return -EINVAL; /* Unmatched reply. */ 3654 3655 spin_lock_irq(&cm_id_priv->lock); 3656 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) { 3657 spin_unlock_irq(&cm_id_priv->lock); 3658 goto out; 3659 } 3660 cm_id_priv->id.state = IB_CM_IDLE; 3661 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 3662 spin_unlock_irq(&cm_id_priv->lock); 3663 3664 cm_format_sidr_rep_event(work); 3665 cm_process_work(cm_id_priv, work); 3666 return 0; 3667 out: 3668 cm_deref_id(cm_id_priv); 3669 return -EINVAL; 3670 } 3671 3672 static void cm_process_send_error(struct ib_mad_send_buf *msg, 3673 enum ib_wc_status wc_status) 3674 { 3675 struct cm_id_private *cm_id_priv; 3676 struct ib_cm_event cm_event; 3677 enum ib_cm_state state; 3678 int ret; 3679 3680 memset(&cm_event, 0, sizeof cm_event); 3681 cm_id_priv = msg->context[0]; 3682 3683 /* Discard old sends or ones without a response. */ 3684 spin_lock_irq(&cm_id_priv->lock); 3685 state = (enum ib_cm_state) (unsigned long) msg->context[1]; 3686 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state) 3687 goto discard; 3688 3689 pr_debug_ratelimited("CM: failed sending MAD in state %d. (%s)\n", 3690 state, ib_wc_status_msg(wc_status)); 3691 switch (state) { 3692 case IB_CM_REQ_SENT: 3693 case IB_CM_MRA_REQ_RCVD: 3694 cm_reset_to_idle(cm_id_priv); 3695 cm_event.event = IB_CM_REQ_ERROR; 3696 break; 3697 case IB_CM_REP_SENT: 3698 case IB_CM_MRA_REP_RCVD: 3699 cm_reset_to_idle(cm_id_priv); 3700 cm_event.event = IB_CM_REP_ERROR; 3701 break; 3702 case IB_CM_DREQ_SENT: 3703 cm_enter_timewait(cm_id_priv); 3704 cm_event.event = IB_CM_DREQ_ERROR; 3705 break; 3706 case IB_CM_SIDR_REQ_SENT: 3707 cm_id_priv->id.state = IB_CM_IDLE; 3708 cm_event.event = IB_CM_SIDR_REQ_ERROR; 3709 break; 3710 default: 3711 goto discard; 3712 } 3713 spin_unlock_irq(&cm_id_priv->lock); 3714 cm_event.param.send_status = wc_status; 3715 3716 /* No other events can occur on the cm_id at this point. */ 3717 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event); 3718 cm_free_msg(msg); 3719 if (ret) 3720 ib_destroy_cm_id(&cm_id_priv->id); 3721 return; 3722 discard: 3723 spin_unlock_irq(&cm_id_priv->lock); 3724 cm_free_msg(msg); 3725 } 3726 3727 static void cm_send_handler(struct ib_mad_agent *mad_agent, 3728 struct ib_mad_send_wc *mad_send_wc) 3729 { 3730 struct ib_mad_send_buf *msg = mad_send_wc->send_buf; 3731 struct cm_port *port; 3732 u16 attr_index; 3733 3734 port = mad_agent->context; 3735 attr_index = be16_to_cpu(((struct ib_mad_hdr *) 3736 msg->mad)->attr_id) - CM_ATTR_ID_OFFSET; 3737 3738 /* 3739 * If the send was in response to a received message (context[0] is not 3740 * set to a cm_id), and is not a REJ, then it is a send that was 3741 * manually retried. 3742 */ 3743 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER)) 3744 msg->retries = 1; 3745 3746 atomic_long_add(1 + msg->retries, 3747 &port->counter_group[CM_XMIT].counter[attr_index]); 3748 if (msg->retries) 3749 atomic_long_add(msg->retries, 3750 &port->counter_group[CM_XMIT_RETRIES]. 3751 counter[attr_index]); 3752 3753 switch (mad_send_wc->status) { 3754 case IB_WC_SUCCESS: 3755 case IB_WC_WR_FLUSH_ERR: 3756 cm_free_msg(msg); 3757 break; 3758 default: 3759 if (msg->context[0] && msg->context[1]) 3760 cm_process_send_error(msg, mad_send_wc->status); 3761 else 3762 cm_free_msg(msg); 3763 break; 3764 } 3765 } 3766 3767 static void cm_work_handler(struct work_struct *_work) 3768 { 3769 struct cm_work *work = container_of(_work, struct cm_work, work.work); 3770 int ret; 3771 3772 switch (work->cm_event.event) { 3773 case IB_CM_REQ_RECEIVED: 3774 ret = cm_req_handler(work); 3775 break; 3776 case IB_CM_MRA_RECEIVED: 3777 ret = cm_mra_handler(work); 3778 break; 3779 case IB_CM_REJ_RECEIVED: 3780 ret = cm_rej_handler(work); 3781 break; 3782 case IB_CM_REP_RECEIVED: 3783 ret = cm_rep_handler(work); 3784 break; 3785 case IB_CM_RTU_RECEIVED: 3786 ret = cm_rtu_handler(work); 3787 break; 3788 case IB_CM_USER_ESTABLISHED: 3789 ret = cm_establish_handler(work); 3790 break; 3791 case IB_CM_DREQ_RECEIVED: 3792 ret = cm_dreq_handler(work); 3793 break; 3794 case IB_CM_DREP_RECEIVED: 3795 ret = cm_drep_handler(work); 3796 break; 3797 case IB_CM_SIDR_REQ_RECEIVED: 3798 ret = cm_sidr_req_handler(work); 3799 break; 3800 case IB_CM_SIDR_REP_RECEIVED: 3801 ret = cm_sidr_rep_handler(work); 3802 break; 3803 case IB_CM_LAP_RECEIVED: 3804 ret = cm_lap_handler(work); 3805 break; 3806 case IB_CM_APR_RECEIVED: 3807 ret = cm_apr_handler(work); 3808 break; 3809 case IB_CM_TIMEWAIT_EXIT: 3810 ret = cm_timewait_handler(work); 3811 break; 3812 default: 3813 pr_debug("cm_event.event: 0x%x\n", work->cm_event.event); 3814 ret = -EINVAL; 3815 break; 3816 } 3817 if (ret) 3818 cm_free_work(work); 3819 } 3820 3821 static int cm_establish(struct ib_cm_id *cm_id) 3822 { 3823 struct cm_id_private *cm_id_priv; 3824 struct cm_work *work; 3825 unsigned long flags; 3826 int ret = 0; 3827 struct cm_device *cm_dev; 3828 3829 cm_dev = ib_get_client_data(cm_id->device, &cm_client); 3830 if (!cm_dev) 3831 return -ENODEV; 3832 3833 work = kmalloc(sizeof *work, GFP_ATOMIC); 3834 if (!work) 3835 return -ENOMEM; 3836 3837 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3838 spin_lock_irqsave(&cm_id_priv->lock, flags); 3839 switch (cm_id->state) 3840 { 3841 case IB_CM_REP_SENT: 3842 case IB_CM_MRA_REP_RCVD: 3843 cm_id->state = IB_CM_ESTABLISHED; 3844 break; 3845 case IB_CM_ESTABLISHED: 3846 ret = -EISCONN; 3847 break; 3848 default: 3849 pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__, 3850 be32_to_cpu(cm_id->local_id), cm_id->state); 3851 ret = -EINVAL; 3852 break; 3853 } 3854 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3855 3856 if (ret) { 3857 kfree(work); 3858 goto out; 3859 } 3860 3861 /* 3862 * The CM worker thread may try to destroy the cm_id before it 3863 * can execute this work item. To prevent potential deadlock, 3864 * we need to find the cm_id once we're in the context of the 3865 * worker thread, rather than holding a reference on it. 3866 */ 3867 INIT_DELAYED_WORK(&work->work, cm_work_handler); 3868 work->local_id = cm_id->local_id; 3869 work->remote_id = cm_id->remote_id; 3870 work->mad_recv_wc = NULL; 3871 work->cm_event.event = IB_CM_USER_ESTABLISHED; 3872 3873 /* Check if the device started its remove_one */ 3874 spin_lock_irqsave(&cm.lock, flags); 3875 if (!cm_dev->going_down) { 3876 queue_delayed_work(cm.wq, &work->work, 0); 3877 } else { 3878 kfree(work); 3879 ret = -ENODEV; 3880 } 3881 spin_unlock_irqrestore(&cm.lock, flags); 3882 3883 out: 3884 return ret; 3885 } 3886 3887 static int cm_migrate(struct ib_cm_id *cm_id) 3888 { 3889 struct cm_id_private *cm_id_priv; 3890 struct cm_av tmp_av; 3891 unsigned long flags; 3892 int tmp_send_port_not_ready; 3893 int ret = 0; 3894 3895 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3896 spin_lock_irqsave(&cm_id_priv->lock, flags); 3897 if (cm_id->state == IB_CM_ESTABLISHED && 3898 (cm_id->lap_state == IB_CM_LAP_UNINIT || 3899 cm_id->lap_state == IB_CM_LAP_IDLE)) { 3900 cm_id->lap_state = IB_CM_LAP_IDLE; 3901 /* Swap address vector */ 3902 tmp_av = cm_id_priv->av; 3903 cm_id_priv->av = cm_id_priv->alt_av; 3904 cm_id_priv->alt_av = tmp_av; 3905 /* Swap port send ready state */ 3906 tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready; 3907 cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready; 3908 cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready; 3909 } else 3910 ret = -EINVAL; 3911 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3912 3913 return ret; 3914 } 3915 3916 int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event) 3917 { 3918 int ret; 3919 3920 switch (event) { 3921 case IB_EVENT_COMM_EST: 3922 ret = cm_establish(cm_id); 3923 break; 3924 case IB_EVENT_PATH_MIG: 3925 ret = cm_migrate(cm_id); 3926 break; 3927 default: 3928 ret = -EINVAL; 3929 } 3930 return ret; 3931 } 3932 EXPORT_SYMBOL(ib_cm_notify); 3933 3934 static void cm_recv_handler(struct ib_mad_agent *mad_agent, 3935 struct ib_mad_send_buf *send_buf, 3936 struct ib_mad_recv_wc *mad_recv_wc) 3937 { 3938 struct cm_port *port = mad_agent->context; 3939 struct cm_work *work; 3940 enum ib_cm_event_type event; 3941 bool alt_path = false; 3942 u16 attr_id; 3943 int paths = 0; 3944 int going_down = 0; 3945 3946 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) { 3947 case CM_REQ_ATTR_ID: 3948 alt_path = cm_req_has_alt_path((struct cm_req_msg *) 3949 mad_recv_wc->recv_buf.mad); 3950 paths = 1 + (alt_path != 0); 3951 event = IB_CM_REQ_RECEIVED; 3952 break; 3953 case CM_MRA_ATTR_ID: 3954 event = IB_CM_MRA_RECEIVED; 3955 break; 3956 case CM_REJ_ATTR_ID: 3957 event = IB_CM_REJ_RECEIVED; 3958 break; 3959 case CM_REP_ATTR_ID: 3960 event = IB_CM_REP_RECEIVED; 3961 break; 3962 case CM_RTU_ATTR_ID: 3963 event = IB_CM_RTU_RECEIVED; 3964 break; 3965 case CM_DREQ_ATTR_ID: 3966 event = IB_CM_DREQ_RECEIVED; 3967 break; 3968 case CM_DREP_ATTR_ID: 3969 event = IB_CM_DREP_RECEIVED; 3970 break; 3971 case CM_SIDR_REQ_ATTR_ID: 3972 event = IB_CM_SIDR_REQ_RECEIVED; 3973 break; 3974 case CM_SIDR_REP_ATTR_ID: 3975 event = IB_CM_SIDR_REP_RECEIVED; 3976 break; 3977 case CM_LAP_ATTR_ID: 3978 paths = 1; 3979 event = IB_CM_LAP_RECEIVED; 3980 break; 3981 case CM_APR_ATTR_ID: 3982 event = IB_CM_APR_RECEIVED; 3983 break; 3984 default: 3985 ib_free_recv_mad(mad_recv_wc); 3986 return; 3987 } 3988 3989 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id); 3990 atomic_long_inc(&port->counter_group[CM_RECV]. 3991 counter[attr_id - CM_ATTR_ID_OFFSET]); 3992 3993 work = kmalloc(sizeof(*work) + sizeof(struct sa_path_rec) * paths, 3994 GFP_KERNEL); 3995 if (!work) { 3996 ib_free_recv_mad(mad_recv_wc); 3997 return; 3998 } 3999 4000 INIT_DELAYED_WORK(&work->work, cm_work_handler); 4001 work->cm_event.event = event; 4002 work->mad_recv_wc = mad_recv_wc; 4003 work->port = port; 4004 4005 /* Check if the device started its remove_one */ 4006 spin_lock_irq(&cm.lock); 4007 if (!port->cm_dev->going_down) 4008 queue_delayed_work(cm.wq, &work->work, 0); 4009 else 4010 going_down = 1; 4011 spin_unlock_irq(&cm.lock); 4012 4013 if (going_down) { 4014 kfree(work); 4015 ib_free_recv_mad(mad_recv_wc); 4016 } 4017 } 4018 4019 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, 4020 struct ib_qp_attr *qp_attr, 4021 int *qp_attr_mask) 4022 { 4023 unsigned long flags; 4024 int ret; 4025 4026 spin_lock_irqsave(&cm_id_priv->lock, flags); 4027 switch (cm_id_priv->id.state) { 4028 case IB_CM_REQ_SENT: 4029 case IB_CM_MRA_REQ_RCVD: 4030 case IB_CM_REQ_RCVD: 4031 case IB_CM_MRA_REQ_SENT: 4032 case IB_CM_REP_RCVD: 4033 case IB_CM_MRA_REP_SENT: 4034 case IB_CM_REP_SENT: 4035 case IB_CM_MRA_REP_RCVD: 4036 case IB_CM_ESTABLISHED: 4037 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | 4038 IB_QP_PKEY_INDEX | IB_QP_PORT; 4039 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE; 4040 if (cm_id_priv->responder_resources) 4041 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ | 4042 IB_ACCESS_REMOTE_ATOMIC; 4043 qp_attr->pkey_index = cm_id_priv->av.pkey_index; 4044 qp_attr->port_num = cm_id_priv->av.port->port_num; 4045 ret = 0; 4046 break; 4047 default: 4048 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n", 4049 __func__, be32_to_cpu(cm_id_priv->id.local_id), 4050 cm_id_priv->id.state); 4051 ret = -EINVAL; 4052 break; 4053 } 4054 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 4055 return ret; 4056 } 4057 4058 static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv, 4059 struct ib_qp_attr *qp_attr, 4060 int *qp_attr_mask) 4061 { 4062 unsigned long flags; 4063 int ret; 4064 4065 spin_lock_irqsave(&cm_id_priv->lock, flags); 4066 switch (cm_id_priv->id.state) { 4067 case IB_CM_REQ_RCVD: 4068 case IB_CM_MRA_REQ_SENT: 4069 case IB_CM_REP_RCVD: 4070 case IB_CM_MRA_REP_SENT: 4071 case IB_CM_REP_SENT: 4072 case IB_CM_MRA_REP_RCVD: 4073 case IB_CM_ESTABLISHED: 4074 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | 4075 IB_QP_DEST_QPN | IB_QP_RQ_PSN; 4076 qp_attr->ah_attr = cm_id_priv->av.ah_attr; 4077 qp_attr->path_mtu = cm_id_priv->path_mtu; 4078 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn); 4079 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn); 4080 if (cm_id_priv->qp_type == IB_QPT_RC || 4081 cm_id_priv->qp_type == IB_QPT_XRC_TGT) { 4082 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC | 4083 IB_QP_MIN_RNR_TIMER; 4084 qp_attr->max_dest_rd_atomic = 4085 cm_id_priv->responder_resources; 4086 qp_attr->min_rnr_timer = 0; 4087 } 4088 if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) { 4089 *qp_attr_mask |= IB_QP_ALT_PATH; 4090 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; 4091 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index; 4092 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout; 4093 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; 4094 } 4095 ret = 0; 4096 break; 4097 default: 4098 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n", 4099 __func__, be32_to_cpu(cm_id_priv->id.local_id), 4100 cm_id_priv->id.state); 4101 ret = -EINVAL; 4102 break; 4103 } 4104 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 4105 return ret; 4106 } 4107 4108 static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv, 4109 struct ib_qp_attr *qp_attr, 4110 int *qp_attr_mask) 4111 { 4112 unsigned long flags; 4113 int ret; 4114 4115 spin_lock_irqsave(&cm_id_priv->lock, flags); 4116 switch (cm_id_priv->id.state) { 4117 /* Allow transition to RTS before sending REP */ 4118 case IB_CM_REQ_RCVD: 4119 case IB_CM_MRA_REQ_SENT: 4120 4121 case IB_CM_REP_RCVD: 4122 case IB_CM_MRA_REP_SENT: 4123 case IB_CM_REP_SENT: 4124 case IB_CM_MRA_REP_RCVD: 4125 case IB_CM_ESTABLISHED: 4126 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) { 4127 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN; 4128 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn); 4129 switch (cm_id_priv->qp_type) { 4130 case IB_QPT_RC: 4131 case IB_QPT_XRC_INI: 4132 *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY | 4133 IB_QP_MAX_QP_RD_ATOMIC; 4134 qp_attr->retry_cnt = cm_id_priv->retry_count; 4135 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count; 4136 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth; 4137 /* fall through */ 4138 case IB_QPT_XRC_TGT: 4139 *qp_attr_mask |= IB_QP_TIMEOUT; 4140 qp_attr->timeout = cm_id_priv->av.timeout; 4141 break; 4142 default: 4143 break; 4144 } 4145 if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) { 4146 *qp_attr_mask |= IB_QP_PATH_MIG_STATE; 4147 qp_attr->path_mig_state = IB_MIG_REARM; 4148 } 4149 } else { 4150 *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE; 4151 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; 4152 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index; 4153 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout; 4154 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; 4155 qp_attr->path_mig_state = IB_MIG_REARM; 4156 } 4157 ret = 0; 4158 break; 4159 default: 4160 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n", 4161 __func__, be32_to_cpu(cm_id_priv->id.local_id), 4162 cm_id_priv->id.state); 4163 ret = -EINVAL; 4164 break; 4165 } 4166 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 4167 return ret; 4168 } 4169 4170 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id, 4171 struct ib_qp_attr *qp_attr, 4172 int *qp_attr_mask) 4173 { 4174 struct cm_id_private *cm_id_priv; 4175 int ret; 4176 4177 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 4178 switch (qp_attr->qp_state) { 4179 case IB_QPS_INIT: 4180 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask); 4181 break; 4182 case IB_QPS_RTR: 4183 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask); 4184 break; 4185 case IB_QPS_RTS: 4186 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask); 4187 break; 4188 default: 4189 ret = -EINVAL; 4190 break; 4191 } 4192 return ret; 4193 } 4194 EXPORT_SYMBOL(ib_cm_init_qp_attr); 4195 4196 static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr, 4197 char *buf) 4198 { 4199 struct cm_counter_group *group; 4200 struct cm_counter_attribute *cm_attr; 4201 4202 group = container_of(obj, struct cm_counter_group, obj); 4203 cm_attr = container_of(attr, struct cm_counter_attribute, attr); 4204 4205 return sprintf(buf, "%ld\n", 4206 atomic_long_read(&group->counter[cm_attr->index])); 4207 } 4208 4209 static const struct sysfs_ops cm_counter_ops = { 4210 .show = cm_show_counter 4211 }; 4212 4213 static struct kobj_type cm_counter_obj_type = { 4214 .sysfs_ops = &cm_counter_ops, 4215 .default_attrs = cm_counter_default_attrs 4216 }; 4217 4218 static void cm_release_port_obj(struct kobject *obj) 4219 { 4220 struct cm_port *cm_port; 4221 4222 cm_port = container_of(obj, struct cm_port, port_obj); 4223 kfree(cm_port); 4224 } 4225 4226 static struct kobj_type cm_port_obj_type = { 4227 .release = cm_release_port_obj 4228 }; 4229 4230 static char *cm_devnode(struct device *dev, umode_t *mode) 4231 { 4232 if (mode) 4233 *mode = 0666; 4234 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); 4235 } 4236 4237 struct class cm_class = { 4238 .owner = THIS_MODULE, 4239 .name = "infiniband_cm", 4240 .devnode = cm_devnode, 4241 }; 4242 EXPORT_SYMBOL(cm_class); 4243 4244 static int cm_create_port_fs(struct cm_port *port) 4245 { 4246 int i, ret; 4247 4248 ret = kobject_init_and_add(&port->port_obj, &cm_port_obj_type, 4249 &port->cm_dev->device->kobj, 4250 "%d", port->port_num); 4251 if (ret) { 4252 kfree(port); 4253 return ret; 4254 } 4255 4256 for (i = 0; i < CM_COUNTER_GROUPS; i++) { 4257 ret = kobject_init_and_add(&port->counter_group[i].obj, 4258 &cm_counter_obj_type, 4259 &port->port_obj, 4260 "%s", counter_group_names[i]); 4261 if (ret) 4262 goto error; 4263 } 4264 4265 return 0; 4266 4267 error: 4268 while (i--) 4269 kobject_put(&port->counter_group[i].obj); 4270 kobject_put(&port->port_obj); 4271 return ret; 4272 4273 } 4274 4275 static void cm_remove_port_fs(struct cm_port *port) 4276 { 4277 int i; 4278 4279 for (i = 0; i < CM_COUNTER_GROUPS; i++) 4280 kobject_put(&port->counter_group[i].obj); 4281 4282 kobject_put(&port->port_obj); 4283 } 4284 4285 static void cm_add_one(struct ib_device *ib_device) 4286 { 4287 struct cm_device *cm_dev; 4288 struct cm_port *port; 4289 struct ib_mad_reg_req reg_req = { 4290 .mgmt_class = IB_MGMT_CLASS_CM, 4291 .mgmt_class_version = IB_CM_CLASS_VERSION, 4292 }; 4293 struct ib_port_modify port_modify = { 4294 .set_port_cap_mask = IB_PORT_CM_SUP 4295 }; 4296 unsigned long flags; 4297 int ret; 4298 int count = 0; 4299 u8 i; 4300 4301 cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) * 4302 ib_device->phys_port_cnt, GFP_KERNEL); 4303 if (!cm_dev) 4304 return; 4305 4306 cm_dev->ib_device = ib_device; 4307 cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay; 4308 cm_dev->going_down = 0; 4309 cm_dev->device = device_create(&cm_class, &ib_device->dev, 4310 MKDEV(0, 0), NULL, 4311 "%s", ib_device->name); 4312 if (IS_ERR(cm_dev->device)) { 4313 kfree(cm_dev); 4314 return; 4315 } 4316 4317 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask); 4318 for (i = 1; i <= ib_device->phys_port_cnt; i++) { 4319 if (!rdma_cap_ib_cm(ib_device, i)) 4320 continue; 4321 4322 port = kzalloc(sizeof *port, GFP_KERNEL); 4323 if (!port) 4324 goto error1; 4325 4326 cm_dev->port[i-1] = port; 4327 port->cm_dev = cm_dev; 4328 port->port_num = i; 4329 4330 INIT_LIST_HEAD(&port->cm_priv_prim_list); 4331 INIT_LIST_HEAD(&port->cm_priv_altr_list); 4332 4333 ret = cm_create_port_fs(port); 4334 if (ret) 4335 goto error1; 4336 4337 port->mad_agent = ib_register_mad_agent(ib_device, i, 4338 IB_QPT_GSI, 4339 ®_req, 4340 0, 4341 cm_send_handler, 4342 cm_recv_handler, 4343 port, 4344 0); 4345 if (IS_ERR(port->mad_agent)) 4346 goto error2; 4347 4348 ret = ib_modify_port(ib_device, i, 0, &port_modify); 4349 if (ret) 4350 goto error3; 4351 4352 count++; 4353 } 4354 4355 if (!count) 4356 goto free; 4357 4358 ib_set_client_data(ib_device, &cm_client, cm_dev); 4359 4360 write_lock_irqsave(&cm.device_lock, flags); 4361 list_add_tail(&cm_dev->list, &cm.device_list); 4362 write_unlock_irqrestore(&cm.device_lock, flags); 4363 return; 4364 4365 error3: 4366 ib_unregister_mad_agent(port->mad_agent); 4367 error2: 4368 cm_remove_port_fs(port); 4369 error1: 4370 port_modify.set_port_cap_mask = 0; 4371 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP; 4372 while (--i) { 4373 if (!rdma_cap_ib_cm(ib_device, i)) 4374 continue; 4375 4376 port = cm_dev->port[i-1]; 4377 ib_modify_port(ib_device, port->port_num, 0, &port_modify); 4378 ib_unregister_mad_agent(port->mad_agent); 4379 cm_remove_port_fs(port); 4380 } 4381 free: 4382 device_unregister(cm_dev->device); 4383 kfree(cm_dev); 4384 } 4385 4386 static void cm_remove_one(struct ib_device *ib_device, void *client_data) 4387 { 4388 struct cm_device *cm_dev = client_data; 4389 struct cm_port *port; 4390 struct cm_id_private *cm_id_priv; 4391 struct ib_mad_agent *cur_mad_agent; 4392 struct ib_port_modify port_modify = { 4393 .clr_port_cap_mask = IB_PORT_CM_SUP 4394 }; 4395 unsigned long flags; 4396 int i; 4397 4398 if (!cm_dev) 4399 return; 4400 4401 write_lock_irqsave(&cm.device_lock, flags); 4402 list_del(&cm_dev->list); 4403 write_unlock_irqrestore(&cm.device_lock, flags); 4404 4405 spin_lock_irq(&cm.lock); 4406 cm_dev->going_down = 1; 4407 spin_unlock_irq(&cm.lock); 4408 4409 for (i = 1; i <= ib_device->phys_port_cnt; i++) { 4410 if (!rdma_cap_ib_cm(ib_device, i)) 4411 continue; 4412 4413 port = cm_dev->port[i-1]; 4414 ib_modify_port(ib_device, port->port_num, 0, &port_modify); 4415 /* Mark all the cm_id's as not valid */ 4416 spin_lock_irq(&cm.lock); 4417 list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list) 4418 cm_id_priv->altr_send_port_not_ready = 1; 4419 list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list) 4420 cm_id_priv->prim_send_port_not_ready = 1; 4421 spin_unlock_irq(&cm.lock); 4422 /* 4423 * We flush the queue here after the going_down set, this 4424 * verify that no new works will be queued in the recv handler, 4425 * after that we can call the unregister_mad_agent 4426 */ 4427 flush_workqueue(cm.wq); 4428 spin_lock_irq(&cm.state_lock); 4429 cur_mad_agent = port->mad_agent; 4430 port->mad_agent = NULL; 4431 spin_unlock_irq(&cm.state_lock); 4432 ib_unregister_mad_agent(cur_mad_agent); 4433 cm_remove_port_fs(port); 4434 } 4435 4436 device_unregister(cm_dev->device); 4437 kfree(cm_dev); 4438 } 4439 4440 static int __init ib_cm_init(void) 4441 { 4442 int ret; 4443 4444 memset(&cm, 0, sizeof cm); 4445 INIT_LIST_HEAD(&cm.device_list); 4446 rwlock_init(&cm.device_lock); 4447 spin_lock_init(&cm.lock); 4448 spin_lock_init(&cm.state_lock); 4449 cm.listen_service_table = RB_ROOT; 4450 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); 4451 cm.remote_id_table = RB_ROOT; 4452 cm.remote_qp_table = RB_ROOT; 4453 cm.remote_sidr_table = RB_ROOT; 4454 idr_init(&cm.local_id_table); 4455 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand); 4456 INIT_LIST_HEAD(&cm.timewait_list); 4457 4458 ret = class_register(&cm_class); 4459 if (ret) { 4460 ret = -ENOMEM; 4461 goto error1; 4462 } 4463 4464 cm.wq = alloc_workqueue("ib_cm", 0, 1); 4465 if (!cm.wq) { 4466 ret = -ENOMEM; 4467 goto error2; 4468 } 4469 4470 ret = ib_register_client(&cm_client); 4471 if (ret) 4472 goto error3; 4473 4474 return 0; 4475 error3: 4476 destroy_workqueue(cm.wq); 4477 error2: 4478 class_unregister(&cm_class); 4479 error1: 4480 idr_destroy(&cm.local_id_table); 4481 return ret; 4482 } 4483 4484 static void __exit ib_cm_cleanup(void) 4485 { 4486 struct cm_timewait_info *timewait_info, *tmp; 4487 4488 spin_lock_irq(&cm.lock); 4489 list_for_each_entry(timewait_info, &cm.timewait_list, list) 4490 cancel_delayed_work(&timewait_info->work.work); 4491 spin_unlock_irq(&cm.lock); 4492 4493 ib_unregister_client(&cm_client); 4494 destroy_workqueue(cm.wq); 4495 4496 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) { 4497 list_del(&timewait_info->list); 4498 kfree(timewait_info); 4499 } 4500 4501 class_unregister(&cm_class); 4502 idr_destroy(&cm.local_id_table); 4503 } 4504 4505 module_init(ib_cm_init); 4506 module_exit(ib_cm_cleanup); 4507 4508