1 /* 2 * Copyright (c) 2006 Oracle. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 #include <linux/kernel.h> 34 #include <linux/random.h> 35 36 #include "rds.h" 37 38 /* 39 * All of connection management is simplified by serializing it through 40 * work queues that execute in a connection managing thread. 41 * 42 * TCP wants to send acks through sendpage() in response to data_ready(), 43 * but it needs a process context to do so. 44 * 45 * The receive paths need to allocate but can't drop packets (!) so we have 46 * a thread around to block allocating if the receive fast path sees an 47 * allocation failure. 48 */ 49 50 /* Grand Unified Theory of connection life cycle: 51 * At any point in time, the connection can be in one of these states: 52 * DOWN, CONNECTING, UP, DISCONNECTING, ERROR 53 * 54 * The following transitions are possible: 55 * ANY -> ERROR 56 * UP -> DISCONNECTING 57 * ERROR -> DISCONNECTING 58 * DISCONNECTING -> DOWN 59 * DOWN -> CONNECTING 60 * CONNECTING -> UP 61 * 62 * Transition to state DISCONNECTING/DOWN: 63 * - Inside the shutdown worker; synchronizes with xmit path 64 * through c_send_lock, and with connection management callbacks 65 * via c_cm_lock. 66 * 67 * For receive callbacks, we rely on the underlying transport 68 * (TCP, IB/RDMA) to provide the necessary synchronisation. 69 */ 70 struct workqueue_struct *rds_wq; 71 72 void rds_connect_complete(struct rds_connection *conn) 73 { 74 if (!rds_conn_transition(conn, RDS_CONN_CONNECTING, RDS_CONN_UP)) { 75 printk(KERN_WARNING "%s: Cannot transition to state UP, " 76 "current state is %d\n", 77 __func__, 78 atomic_read(&conn->c_state)); 79 atomic_set(&conn->c_state, RDS_CONN_ERROR); 80 queue_work(rds_wq, &conn->c_down_w); 81 return; 82 } 83 84 rdsdebug("conn %p for %pI4 to %pI4 complete\n", 85 conn, &conn->c_laddr, &conn->c_faddr); 86 87 conn->c_reconnect_jiffies = 0; 88 set_bit(0, &conn->c_map_queued); 89 queue_delayed_work(rds_wq, &conn->c_send_w, 0); 90 queue_delayed_work(rds_wq, &conn->c_recv_w, 0); 91 } 92 93 /* 94 * This random exponential backoff is relied on to eventually resolve racing 95 * connects. 96 * 97 * If connect attempts race then both parties drop both connections and come 98 * here to wait for a random amount of time before trying again. Eventually 99 * the backoff range will be so much greater than the time it takes to 100 * establish a connection that one of the pair will establish the connection 101 * before the other's random delay fires. 102 * 103 * Connection attempts that arrive while a connection is already established 104 * are also considered to be racing connects. This lets a connection from 105 * a rebooted machine replace an existing stale connection before the transport 106 * notices that the connection has failed. 107 * 108 * We should *always* start with a random backoff; otherwise a broken connection 109 * will always take several iterations to be re-established. 110 */ 111 static void rds_queue_reconnect(struct rds_connection *conn) 112 { 113 unsigned long rand; 114 115 rdsdebug("conn %p for %pI4 to %pI4 reconnect jiffies %lu\n", 116 conn, &conn->c_laddr, &conn->c_faddr, 117 conn->c_reconnect_jiffies); 118 119 set_bit(RDS_RECONNECT_PENDING, &conn->c_flags); 120 if (conn->c_reconnect_jiffies == 0) { 121 conn->c_reconnect_jiffies = rds_sysctl_reconnect_min_jiffies; 122 queue_delayed_work(rds_wq, &conn->c_conn_w, 0); 123 return; 124 } 125 126 get_random_bytes(&rand, sizeof(rand)); 127 rdsdebug("%lu delay %lu ceil conn %p for %pI4 -> %pI4\n", 128 rand % conn->c_reconnect_jiffies, conn->c_reconnect_jiffies, 129 conn, &conn->c_laddr, &conn->c_faddr); 130 queue_delayed_work(rds_wq, &conn->c_conn_w, 131 rand % conn->c_reconnect_jiffies); 132 133 conn->c_reconnect_jiffies = min(conn->c_reconnect_jiffies * 2, 134 rds_sysctl_reconnect_max_jiffies); 135 } 136 137 void rds_connect_worker(struct work_struct *work) 138 { 139 struct rds_connection *conn = container_of(work, struct rds_connection, c_conn_w.work); 140 int ret; 141 142 clear_bit(RDS_RECONNECT_PENDING, &conn->c_flags); 143 if (rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) { 144 ret = conn->c_trans->conn_connect(conn); 145 rdsdebug("conn %p for %pI4 to %pI4 dispatched, ret %d\n", 146 conn, &conn->c_laddr, &conn->c_faddr, ret); 147 148 if (ret) { 149 if (rds_conn_transition(conn, RDS_CONN_CONNECTING, RDS_CONN_DOWN)) 150 rds_queue_reconnect(conn); 151 else 152 rds_conn_error(conn, "RDS: connect failed\n"); 153 } 154 } 155 } 156 157 void rds_shutdown_worker(struct work_struct *work) 158 { 159 struct rds_connection *conn = container_of(work, struct rds_connection, c_down_w); 160 161 /* shut it down unless it's down already */ 162 if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_DOWN)) { 163 /* 164 * Quiesce the connection mgmt handlers before we start tearing 165 * things down. We don't hold the mutex for the entire 166 * duration of the shutdown operation, else we may be 167 * deadlocking with the CM handler. Instead, the CM event 168 * handler is supposed to check for state DISCONNECTING 169 */ 170 mutex_lock(&conn->c_cm_lock); 171 if (!rds_conn_transition(conn, RDS_CONN_UP, RDS_CONN_DISCONNECTING) 172 && !rds_conn_transition(conn, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) { 173 rds_conn_error(conn, "shutdown called in state %d\n", 174 atomic_read(&conn->c_state)); 175 mutex_unlock(&conn->c_cm_lock); 176 return; 177 } 178 mutex_unlock(&conn->c_cm_lock); 179 180 mutex_lock(&conn->c_send_lock); 181 conn->c_trans->conn_shutdown(conn); 182 rds_conn_reset(conn); 183 mutex_unlock(&conn->c_send_lock); 184 185 if (!rds_conn_transition(conn, RDS_CONN_DISCONNECTING, RDS_CONN_DOWN)) { 186 /* This can happen - eg when we're in the middle of tearing 187 * down the connection, and someone unloads the rds module. 188 * Quite reproduceable with loopback connections. 189 * Mostly harmless. 190 */ 191 rds_conn_error(conn, 192 "%s: failed to transition to state DOWN, " 193 "current state is %d\n", 194 __func__, 195 atomic_read(&conn->c_state)); 196 return; 197 } 198 } 199 200 /* Then reconnect if it's still live. 201 * The passive side of an IB loopback connection is never added 202 * to the conn hash, so we never trigger a reconnect on this 203 * conn - the reconnect is always triggered by the active peer. */ 204 cancel_delayed_work(&conn->c_conn_w); 205 if (!hlist_unhashed(&conn->c_hash_node)) 206 rds_queue_reconnect(conn); 207 } 208 209 void rds_send_worker(struct work_struct *work) 210 { 211 struct rds_connection *conn = container_of(work, struct rds_connection, c_send_w.work); 212 int ret; 213 214 if (rds_conn_state(conn) == RDS_CONN_UP) { 215 ret = rds_send_xmit(conn); 216 rdsdebug("conn %p ret %d\n", conn, ret); 217 switch (ret) { 218 case -EAGAIN: 219 rds_stats_inc(s_send_immediate_retry); 220 queue_delayed_work(rds_wq, &conn->c_send_w, 0); 221 break; 222 case -ENOMEM: 223 rds_stats_inc(s_send_delayed_retry); 224 queue_delayed_work(rds_wq, &conn->c_send_w, 2); 225 default: 226 break; 227 } 228 } 229 } 230 231 void rds_recv_worker(struct work_struct *work) 232 { 233 struct rds_connection *conn = container_of(work, struct rds_connection, c_recv_w.work); 234 int ret; 235 236 if (rds_conn_state(conn) == RDS_CONN_UP) { 237 ret = conn->c_trans->recv(conn); 238 rdsdebug("conn %p ret %d\n", conn, ret); 239 switch (ret) { 240 case -EAGAIN: 241 rds_stats_inc(s_recv_immediate_retry); 242 queue_delayed_work(rds_wq, &conn->c_recv_w, 0); 243 break; 244 case -ENOMEM: 245 rds_stats_inc(s_recv_delayed_retry); 246 queue_delayed_work(rds_wq, &conn->c_recv_w, 2); 247 default: 248 break; 249 } 250 } 251 } 252 253 void rds_threads_exit(void) 254 { 255 destroy_workqueue(rds_wq); 256 } 257 258 int __init rds_threads_init(void) 259 { 260 rds_wq = create_singlethread_workqueue("krdsd"); 261 if (rds_wq == NULL) 262 return -ENOMEM; 263 264 return 0; 265 } 266