1 /* 2 * Copyright (c) 2006 Oracle. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 #include <linux/kernel.h> 34 #include <linux/list.h> 35 #include <linux/slab.h> 36 #include <linux/export.h> 37 #include <net/inet_hashtables.h> 38 39 #include "rds.h" 40 #include "loop.h" 41 42 #define RDS_CONNECTION_HASH_BITS 12 43 #define RDS_CONNECTION_HASH_ENTRIES (1 << RDS_CONNECTION_HASH_BITS) 44 #define RDS_CONNECTION_HASH_MASK (RDS_CONNECTION_HASH_ENTRIES - 1) 45 46 /* converting this to RCU is a chore for another day.. */ 47 static DEFINE_SPINLOCK(rds_conn_lock); 48 static unsigned long rds_conn_count; 49 static struct hlist_head rds_conn_hash[RDS_CONNECTION_HASH_ENTRIES]; 50 static struct kmem_cache *rds_conn_slab; 51 52 static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr) 53 { 54 static u32 rds_hash_secret __read_mostly; 55 56 unsigned long hash; 57 58 net_get_random_once(&rds_hash_secret, sizeof(rds_hash_secret)); 59 60 /* Pass NULL, don't need struct net for hash */ 61 hash = __inet_ehashfn(be32_to_cpu(laddr), 0, 62 be32_to_cpu(faddr), 0, 63 rds_hash_secret); 64 return &rds_conn_hash[hash & RDS_CONNECTION_HASH_MASK]; 65 } 66 67 #define rds_conn_info_set(var, test, suffix) do { \ 68 if (test) \ 69 var |= RDS_INFO_CONNECTION_FLAG_##suffix; \ 70 } while (0) 71 72 /* rcu read lock must be held or the connection spinlock */ 73 static struct rds_connection *rds_conn_lookup(struct hlist_head *head, 74 __be32 laddr, __be32 faddr, 75 struct rds_transport *trans) 76 { 77 struct rds_connection *conn, *ret = NULL; 78 79 hlist_for_each_entry_rcu(conn, head, c_hash_node) { 80 if (conn->c_faddr == faddr && conn->c_laddr == laddr && 81 conn->c_trans == trans) { 82 ret = conn; 83 break; 84 } 85 } 86 rdsdebug("returning conn %p for %pI4 -> %pI4\n", ret, 87 &laddr, &faddr); 88 return ret; 89 } 90 91 /* 92 * This is called by transports as they're bringing down a connection. 93 * It clears partial message state so that the transport can start sending 94 * and receiving over this connection again in the future. It is up to 95 * the transport to have serialized this call with its send and recv. 96 */ 97 static void rds_conn_reset(struct rds_connection *conn) 98 { 99 rdsdebug("connection %pI4 to %pI4 reset\n", 100 &conn->c_laddr, &conn->c_faddr); 101 102 rds_stats_inc(s_conn_reset); 103 rds_send_reset(conn); 104 conn->c_flags = 0; 105 106 /* Do not clear next_rx_seq here, else we cannot distinguish 107 * retransmitted packets from new packets, and will hand all 108 * of them to the application. That is not consistent with the 109 * reliability guarantees of RDS. */ 110 } 111 112 /* 113 * There is only every one 'conn' for a given pair of addresses in the 114 * system at a time. They contain messages to be retransmitted and so 115 * span the lifetime of the actual underlying transport connections. 116 * 117 * For now they are not garbage collected once they're created. They 118 * are torn down as the module is removed, if ever. 119 */ 120 static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr, 121 struct rds_transport *trans, gfp_t gfp, 122 int is_outgoing) 123 { 124 struct rds_connection *conn, *parent = NULL; 125 struct hlist_head *head = rds_conn_bucket(laddr, faddr); 126 struct rds_transport *loop_trans; 127 unsigned long flags; 128 int ret; 129 130 rcu_read_lock(); 131 conn = rds_conn_lookup(head, laddr, faddr, trans); 132 if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport && 133 !is_outgoing) { 134 /* This is a looped back IB connection, and we're 135 * called by the code handling the incoming connect. 136 * We need a second connection object into which we 137 * can stick the other QP. */ 138 parent = conn; 139 conn = parent->c_passive; 140 } 141 rcu_read_unlock(); 142 if (conn) 143 goto out; 144 145 conn = kmem_cache_zalloc(rds_conn_slab, gfp); 146 if (!conn) { 147 conn = ERR_PTR(-ENOMEM); 148 goto out; 149 } 150 151 INIT_HLIST_NODE(&conn->c_hash_node); 152 conn->c_laddr = laddr; 153 conn->c_faddr = faddr; 154 spin_lock_init(&conn->c_lock); 155 conn->c_next_tx_seq = 1; 156 157 init_waitqueue_head(&conn->c_waitq); 158 INIT_LIST_HEAD(&conn->c_send_queue); 159 INIT_LIST_HEAD(&conn->c_retrans); 160 161 ret = rds_cong_get_maps(conn); 162 if (ret) { 163 kmem_cache_free(rds_conn_slab, conn); 164 conn = ERR_PTR(ret); 165 goto out; 166 } 167 168 /* 169 * This is where a connection becomes loopback. If *any* RDS sockets 170 * can bind to the destination address then we'd rather the messages 171 * flow through loopback rather than either transport. 172 */ 173 loop_trans = rds_trans_get_preferred(faddr); 174 if (loop_trans) { 175 rds_trans_put(loop_trans); 176 conn->c_loopback = 1; 177 if (is_outgoing && trans->t_prefer_loopback) { 178 /* "outgoing" connection - and the transport 179 * says it wants the connection handled by the 180 * loopback transport. This is what TCP does. 181 */ 182 trans = &rds_loop_transport; 183 } 184 } 185 186 conn->c_trans = trans; 187 188 ret = trans->conn_alloc(conn, gfp); 189 if (ret) { 190 kmem_cache_free(rds_conn_slab, conn); 191 conn = ERR_PTR(ret); 192 goto out; 193 } 194 195 atomic_set(&conn->c_state, RDS_CONN_DOWN); 196 conn->c_reconnect_jiffies = 0; 197 INIT_DELAYED_WORK(&conn->c_send_w, rds_send_worker); 198 INIT_DELAYED_WORK(&conn->c_recv_w, rds_recv_worker); 199 INIT_DELAYED_WORK(&conn->c_conn_w, rds_connect_worker); 200 INIT_WORK(&conn->c_down_w, rds_shutdown_worker); 201 mutex_init(&conn->c_cm_lock); 202 conn->c_flags = 0; 203 204 rdsdebug("allocated conn %p for %pI4 -> %pI4 over %s %s\n", 205 conn, &laddr, &faddr, 206 trans->t_name ? trans->t_name : "[unknown]", 207 is_outgoing ? "(outgoing)" : ""); 208 209 /* 210 * Since we ran without holding the conn lock, someone could 211 * have created the same conn (either normal or passive) in the 212 * interim. We check while holding the lock. If we won, we complete 213 * init and return our conn. If we lost, we rollback and return the 214 * other one. 215 */ 216 spin_lock_irqsave(&rds_conn_lock, flags); 217 if (parent) { 218 /* Creating passive conn */ 219 if (parent->c_passive) { 220 trans->conn_free(conn->c_transport_data); 221 kmem_cache_free(rds_conn_slab, conn); 222 conn = parent->c_passive; 223 } else { 224 parent->c_passive = conn; 225 rds_cong_add_conn(conn); 226 rds_conn_count++; 227 } 228 } else { 229 /* Creating normal conn */ 230 struct rds_connection *found; 231 232 found = rds_conn_lookup(head, laddr, faddr, trans); 233 if (found) { 234 trans->conn_free(conn->c_transport_data); 235 kmem_cache_free(rds_conn_slab, conn); 236 conn = found; 237 } else { 238 hlist_add_head_rcu(&conn->c_hash_node, head); 239 rds_cong_add_conn(conn); 240 rds_conn_count++; 241 } 242 } 243 spin_unlock_irqrestore(&rds_conn_lock, flags); 244 245 out: 246 return conn; 247 } 248 249 struct rds_connection *rds_conn_create(__be32 laddr, __be32 faddr, 250 struct rds_transport *trans, gfp_t gfp) 251 { 252 return __rds_conn_create(laddr, faddr, trans, gfp, 0); 253 } 254 EXPORT_SYMBOL_GPL(rds_conn_create); 255 256 struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr, 257 struct rds_transport *trans, gfp_t gfp) 258 { 259 return __rds_conn_create(laddr, faddr, trans, gfp, 1); 260 } 261 EXPORT_SYMBOL_GPL(rds_conn_create_outgoing); 262 263 void rds_conn_shutdown(struct rds_connection *conn) 264 { 265 /* shut it down unless it's down already */ 266 if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_DOWN)) { 267 /* 268 * Quiesce the connection mgmt handlers before we start tearing 269 * things down. We don't hold the mutex for the entire 270 * duration of the shutdown operation, else we may be 271 * deadlocking with the CM handler. Instead, the CM event 272 * handler is supposed to check for state DISCONNECTING 273 */ 274 mutex_lock(&conn->c_cm_lock); 275 if (!rds_conn_transition(conn, RDS_CONN_UP, RDS_CONN_DISCONNECTING) 276 && !rds_conn_transition(conn, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) { 277 rds_conn_error(conn, "shutdown called in state %d\n", 278 atomic_read(&conn->c_state)); 279 mutex_unlock(&conn->c_cm_lock); 280 return; 281 } 282 mutex_unlock(&conn->c_cm_lock); 283 284 wait_event(conn->c_waitq, 285 !test_bit(RDS_IN_XMIT, &conn->c_flags)); 286 287 conn->c_trans->conn_shutdown(conn); 288 rds_conn_reset(conn); 289 290 if (!rds_conn_transition(conn, RDS_CONN_DISCONNECTING, RDS_CONN_DOWN)) { 291 /* This can happen - eg when we're in the middle of tearing 292 * down the connection, and someone unloads the rds module. 293 * Quite reproduceable with loopback connections. 294 * Mostly harmless. 295 */ 296 rds_conn_error(conn, 297 "%s: failed to transition to state DOWN, " 298 "current state is %d\n", 299 __func__, 300 atomic_read(&conn->c_state)); 301 return; 302 } 303 } 304 305 /* Then reconnect if it's still live. 306 * The passive side of an IB loopback connection is never added 307 * to the conn hash, so we never trigger a reconnect on this 308 * conn - the reconnect is always triggered by the active peer. */ 309 cancel_delayed_work_sync(&conn->c_conn_w); 310 rcu_read_lock(); 311 if (!hlist_unhashed(&conn->c_hash_node)) { 312 rcu_read_unlock(); 313 rds_queue_reconnect(conn); 314 } else { 315 rcu_read_unlock(); 316 } 317 } 318 319 /* 320 * Stop and free a connection. 321 * 322 * This can only be used in very limited circumstances. It assumes that once 323 * the conn has been shutdown that no one else is referencing the connection. 324 * We can only ensure this in the rmmod path in the current code. 325 */ 326 void rds_conn_destroy(struct rds_connection *conn) 327 { 328 struct rds_message *rm, *rtmp; 329 unsigned long flags; 330 331 rdsdebug("freeing conn %p for %pI4 -> " 332 "%pI4\n", conn, &conn->c_laddr, 333 &conn->c_faddr); 334 335 /* Ensure conn will not be scheduled for reconnect */ 336 spin_lock_irq(&rds_conn_lock); 337 hlist_del_init_rcu(&conn->c_hash_node); 338 spin_unlock_irq(&rds_conn_lock); 339 synchronize_rcu(); 340 341 /* shut the connection down */ 342 rds_conn_drop(conn); 343 flush_work(&conn->c_down_w); 344 345 /* make sure lingering queued work won't try to ref the conn */ 346 cancel_delayed_work_sync(&conn->c_send_w); 347 cancel_delayed_work_sync(&conn->c_recv_w); 348 349 /* tear down queued messages */ 350 list_for_each_entry_safe(rm, rtmp, 351 &conn->c_send_queue, 352 m_conn_item) { 353 list_del_init(&rm->m_conn_item); 354 BUG_ON(!list_empty(&rm->m_sock_item)); 355 rds_message_put(rm); 356 } 357 if (conn->c_xmit_rm) 358 rds_message_put(conn->c_xmit_rm); 359 360 conn->c_trans->conn_free(conn->c_transport_data); 361 362 /* 363 * The congestion maps aren't freed up here. They're 364 * freed by rds_cong_exit() after all the connections 365 * have been freed. 366 */ 367 rds_cong_remove_conn(conn); 368 369 BUG_ON(!list_empty(&conn->c_retrans)); 370 kmem_cache_free(rds_conn_slab, conn); 371 372 spin_lock_irqsave(&rds_conn_lock, flags); 373 rds_conn_count--; 374 spin_unlock_irqrestore(&rds_conn_lock, flags); 375 } 376 EXPORT_SYMBOL_GPL(rds_conn_destroy); 377 378 static void rds_conn_message_info(struct socket *sock, unsigned int len, 379 struct rds_info_iterator *iter, 380 struct rds_info_lengths *lens, 381 int want_send) 382 { 383 struct hlist_head *head; 384 struct list_head *list; 385 struct rds_connection *conn; 386 struct rds_message *rm; 387 unsigned int total = 0; 388 unsigned long flags; 389 size_t i; 390 391 len /= sizeof(struct rds_info_message); 392 393 rcu_read_lock(); 394 395 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); 396 i++, head++) { 397 hlist_for_each_entry_rcu(conn, head, c_hash_node) { 398 if (want_send) 399 list = &conn->c_send_queue; 400 else 401 list = &conn->c_retrans; 402 403 spin_lock_irqsave(&conn->c_lock, flags); 404 405 /* XXX too lazy to maintain counts.. */ 406 list_for_each_entry(rm, list, m_conn_item) { 407 total++; 408 if (total <= len) 409 rds_inc_info_copy(&rm->m_inc, iter, 410 conn->c_laddr, 411 conn->c_faddr, 0); 412 } 413 414 spin_unlock_irqrestore(&conn->c_lock, flags); 415 } 416 } 417 rcu_read_unlock(); 418 419 lens->nr = total; 420 lens->each = sizeof(struct rds_info_message); 421 } 422 423 static void rds_conn_message_info_send(struct socket *sock, unsigned int len, 424 struct rds_info_iterator *iter, 425 struct rds_info_lengths *lens) 426 { 427 rds_conn_message_info(sock, len, iter, lens, 1); 428 } 429 430 static void rds_conn_message_info_retrans(struct socket *sock, 431 unsigned int len, 432 struct rds_info_iterator *iter, 433 struct rds_info_lengths *lens) 434 { 435 rds_conn_message_info(sock, len, iter, lens, 0); 436 } 437 438 void rds_for_each_conn_info(struct socket *sock, unsigned int len, 439 struct rds_info_iterator *iter, 440 struct rds_info_lengths *lens, 441 int (*visitor)(struct rds_connection *, void *), 442 size_t item_len) 443 { 444 uint64_t buffer[(item_len + 7) / 8]; 445 struct hlist_head *head; 446 struct rds_connection *conn; 447 size_t i; 448 449 rcu_read_lock(); 450 451 lens->nr = 0; 452 lens->each = item_len; 453 454 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); 455 i++, head++) { 456 hlist_for_each_entry_rcu(conn, head, c_hash_node) { 457 458 /* XXX no c_lock usage.. */ 459 if (!visitor(conn, buffer)) 460 continue; 461 462 /* We copy as much as we can fit in the buffer, 463 * but we count all items so that the caller 464 * can resize the buffer. */ 465 if (len >= item_len) { 466 rds_info_copy(iter, buffer, item_len); 467 len -= item_len; 468 } 469 lens->nr++; 470 } 471 } 472 rcu_read_unlock(); 473 } 474 EXPORT_SYMBOL_GPL(rds_for_each_conn_info); 475 476 static int rds_conn_info_visitor(struct rds_connection *conn, 477 void *buffer) 478 { 479 struct rds_info_connection *cinfo = buffer; 480 481 cinfo->next_tx_seq = conn->c_next_tx_seq; 482 cinfo->next_rx_seq = conn->c_next_rx_seq; 483 cinfo->laddr = conn->c_laddr; 484 cinfo->faddr = conn->c_faddr; 485 strncpy(cinfo->transport, conn->c_trans->t_name, 486 sizeof(cinfo->transport)); 487 cinfo->flags = 0; 488 489 rds_conn_info_set(cinfo->flags, test_bit(RDS_IN_XMIT, &conn->c_flags), 490 SENDING); 491 /* XXX Future: return the state rather than these funky bits */ 492 rds_conn_info_set(cinfo->flags, 493 atomic_read(&conn->c_state) == RDS_CONN_CONNECTING, 494 CONNECTING); 495 rds_conn_info_set(cinfo->flags, 496 atomic_read(&conn->c_state) == RDS_CONN_UP, 497 CONNECTED); 498 return 1; 499 } 500 501 static void rds_conn_info(struct socket *sock, unsigned int len, 502 struct rds_info_iterator *iter, 503 struct rds_info_lengths *lens) 504 { 505 rds_for_each_conn_info(sock, len, iter, lens, 506 rds_conn_info_visitor, 507 sizeof(struct rds_info_connection)); 508 } 509 510 int rds_conn_init(void) 511 { 512 rds_conn_slab = kmem_cache_create("rds_connection", 513 sizeof(struct rds_connection), 514 0, 0, NULL); 515 if (!rds_conn_slab) 516 return -ENOMEM; 517 518 rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info); 519 rds_info_register_func(RDS_INFO_SEND_MESSAGES, 520 rds_conn_message_info_send); 521 rds_info_register_func(RDS_INFO_RETRANS_MESSAGES, 522 rds_conn_message_info_retrans); 523 524 return 0; 525 } 526 527 void rds_conn_exit(void) 528 { 529 rds_loop_exit(); 530 531 WARN_ON(!hlist_empty(rds_conn_hash)); 532 533 kmem_cache_destroy(rds_conn_slab); 534 535 rds_info_deregister_func(RDS_INFO_CONNECTIONS, rds_conn_info); 536 rds_info_deregister_func(RDS_INFO_SEND_MESSAGES, 537 rds_conn_message_info_send); 538 rds_info_deregister_func(RDS_INFO_RETRANS_MESSAGES, 539 rds_conn_message_info_retrans); 540 } 541 542 /* 543 * Force a disconnect 544 */ 545 void rds_conn_drop(struct rds_connection *conn) 546 { 547 atomic_set(&conn->c_state, RDS_CONN_ERROR); 548 queue_work(rds_wq, &conn->c_down_w); 549 } 550 EXPORT_SYMBOL_GPL(rds_conn_drop); 551 552 /* 553 * If the connection is down, trigger a connect. We may have scheduled a 554 * delayed reconnect however - in this case we should not interfere. 555 */ 556 void rds_conn_connect_if_down(struct rds_connection *conn) 557 { 558 if (rds_conn_state(conn) == RDS_CONN_DOWN && 559 !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags)) 560 queue_delayed_work(rds_wq, &conn->c_conn_w, 0); 561 } 562 EXPORT_SYMBOL_GPL(rds_conn_connect_if_down); 563 564 /* 565 * An error occurred on the connection 566 */ 567 void 568 __rds_conn_error(struct rds_connection *conn, const char *fmt, ...) 569 { 570 va_list ap; 571 572 va_start(ap, fmt); 573 vprintk(fmt, ap); 574 va_end(ap); 575 576 rds_conn_drop(conn); 577 } 578