100e0f34cSAndy Grover /* 200e0f34cSAndy Grover * Copyright (c) 2006 Oracle. All rights reserved. 300e0f34cSAndy Grover * 400e0f34cSAndy Grover * This software is available to you under a choice of one of two 500e0f34cSAndy Grover * licenses. You may choose to be licensed under the terms of the GNU 600e0f34cSAndy Grover * General Public License (GPL) Version 2, available from the file 700e0f34cSAndy Grover * COPYING in the main directory of this source tree, or the 800e0f34cSAndy Grover * OpenIB.org BSD license below: 900e0f34cSAndy Grover * 1000e0f34cSAndy Grover * Redistribution and use in source and binary forms, with or 1100e0f34cSAndy Grover * without modification, are permitted provided that the following 1200e0f34cSAndy Grover * conditions are met: 1300e0f34cSAndy Grover * 1400e0f34cSAndy Grover * - Redistributions of source code must retain the above 1500e0f34cSAndy Grover * copyright notice, this list of conditions and the following 1600e0f34cSAndy Grover * disclaimer. 1700e0f34cSAndy Grover * 1800e0f34cSAndy Grover * - Redistributions in binary form must reproduce the above 1900e0f34cSAndy Grover * copyright notice, this list of conditions and the following 2000e0f34cSAndy Grover * disclaimer in the documentation and/or other materials 2100e0f34cSAndy Grover * provided with the distribution. 2200e0f34cSAndy Grover * 2300e0f34cSAndy Grover * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 2400e0f34cSAndy Grover * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 2500e0f34cSAndy Grover * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 2600e0f34cSAndy Grover * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 2700e0f34cSAndy Grover * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 2800e0f34cSAndy Grover * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 2900e0f34cSAndy Grover * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 3000e0f34cSAndy Grover * SOFTWARE. 3100e0f34cSAndy Grover * 3200e0f34cSAndy Grover */ 3300e0f34cSAndy Grover #include <linux/kernel.h> 3400e0f34cSAndy Grover #include <linux/random.h> 35bc3b2d7fSPaul Gortmaker #include <linux/export.h> 3600e0f34cSAndy Grover 3700e0f34cSAndy Grover #include "rds.h" 3800e0f34cSAndy Grover 3900e0f34cSAndy Grover /* 4000e0f34cSAndy Grover * All of connection management is simplified by serializing it through 4100e0f34cSAndy Grover * work queues that execute in a connection managing thread. 4200e0f34cSAndy Grover * 4300e0f34cSAndy Grover * TCP wants to send acks through sendpage() in response to data_ready(), 4400e0f34cSAndy Grover * but it needs a process context to do so. 4500e0f34cSAndy Grover * 4600e0f34cSAndy Grover * The receive paths need to allocate but can't drop packets (!) so we have 4700e0f34cSAndy Grover * a thread around to block allocating if the receive fast path sees an 4800e0f34cSAndy Grover * allocation failure. 4900e0f34cSAndy Grover */ 5000e0f34cSAndy Grover 5100e0f34cSAndy Grover /* Grand Unified Theory of connection life cycle: 5200e0f34cSAndy Grover * At any point in time, the connection can be in one of these states: 5300e0f34cSAndy Grover * DOWN, CONNECTING, UP, DISCONNECTING, ERROR 5400e0f34cSAndy Grover * 5500e0f34cSAndy Grover * The following transitions are possible: 5600e0f34cSAndy Grover * ANY -> ERROR 5700e0f34cSAndy Grover * UP -> DISCONNECTING 5800e0f34cSAndy Grover * ERROR -> DISCONNECTING 5900e0f34cSAndy Grover * DISCONNECTING -> DOWN 6000e0f34cSAndy Grover * DOWN -> CONNECTING 6100e0f34cSAndy Grover * CONNECTING -> UP 6200e0f34cSAndy Grover * 6300e0f34cSAndy Grover * Transition to state DISCONNECTING/DOWN: 6400e0f34cSAndy Grover * - Inside the shutdown worker; synchronizes with xmit path 650f4b1c7eSZach Brown * through RDS_IN_XMIT, and with connection management callbacks 6600e0f34cSAndy Grover * via c_cm_lock. 6700e0f34cSAndy Grover * 6800e0f34cSAndy Grover * For receive callbacks, we rely on the underlying transport 6900e0f34cSAndy Grover * (TCP, IB/RDMA) to provide the necessary synchronisation. 7000e0f34cSAndy Grover */ 7100e0f34cSAndy Grover struct workqueue_struct *rds_wq; 72616b757aSAndy Grover EXPORT_SYMBOL_GPL(rds_wq); 7300e0f34cSAndy Grover 740cb43965SSowmini Varadhan void rds_connect_path_complete(struct rds_conn_path *cp, int curr) 7500e0f34cSAndy Grover { 760cb43965SSowmini Varadhan if (!rds_conn_path_transition(cp, curr, RDS_CONN_UP)) { 7700e0f34cSAndy Grover printk(KERN_WARNING "%s: Cannot transition to state UP, " 7800e0f34cSAndy Grover "current state is %d\n", 7900e0f34cSAndy Grover __func__, 800cb43965SSowmini Varadhan atomic_read(&cp->cp_state)); 810cb43965SSowmini Varadhan rds_conn_path_drop(cp); 8200e0f34cSAndy Grover return; 8300e0f34cSAndy Grover } 8400e0f34cSAndy Grover 8500e0f34cSAndy Grover rdsdebug("conn %p for %pI4 to %pI4 complete\n", 860cb43965SSowmini Varadhan cp->cp_conn, &cp->cp_conn->c_laddr, &cp->cp_conn->c_faddr); 8700e0f34cSAndy Grover 880cb43965SSowmini Varadhan cp->cp_reconnect_jiffies = 0; 890cb43965SSowmini Varadhan set_bit(0, &cp->cp_conn->c_map_queued); 900cb43965SSowmini Varadhan queue_delayed_work(rds_wq, &cp->cp_send_w, 0); 910cb43965SSowmini Varadhan queue_delayed_work(rds_wq, &cp->cp_recv_w, 0); 9200e0f34cSAndy Grover } 939c79440eSSowmini Varadhan EXPORT_SYMBOL_GPL(rds_connect_path_complete); 949c79440eSSowmini Varadhan 959c79440eSSowmini Varadhan void rds_connect_complete(struct rds_connection *conn) 969c79440eSSowmini Varadhan { 970cb43965SSowmini Varadhan rds_connect_path_complete(&conn->c_path[0], RDS_CONN_CONNECTING); 989c79440eSSowmini Varadhan } 99616b757aSAndy Grover EXPORT_SYMBOL_GPL(rds_connect_complete); 10000e0f34cSAndy Grover 10100e0f34cSAndy Grover /* 10200e0f34cSAndy Grover * This random exponential backoff is relied on to eventually resolve racing 10300e0f34cSAndy Grover * connects. 10400e0f34cSAndy Grover * 10500e0f34cSAndy Grover * If connect attempts race then both parties drop both connections and come 10600e0f34cSAndy Grover * here to wait for a random amount of time before trying again. Eventually 10700e0f34cSAndy Grover * the backoff range will be so much greater than the time it takes to 10800e0f34cSAndy Grover * establish a connection that one of the pair will establish the connection 10900e0f34cSAndy Grover * before the other's random delay fires. 11000e0f34cSAndy Grover * 11100e0f34cSAndy Grover * Connection attempts that arrive while a connection is already established 11200e0f34cSAndy Grover * are also considered to be racing connects. This lets a connection from 11300e0f34cSAndy Grover * a rebooted machine replace an existing stale connection before the transport 11400e0f34cSAndy Grover * notices that the connection has failed. 11500e0f34cSAndy Grover * 11600e0f34cSAndy Grover * We should *always* start with a random backoff; otherwise a broken connection 11700e0f34cSAndy Grover * will always take several iterations to be re-established. 11800e0f34cSAndy Grover */ 1190cb43965SSowmini Varadhan void rds_queue_reconnect(struct rds_conn_path *cp) 12000e0f34cSAndy Grover { 12100e0f34cSAndy Grover unsigned long rand; 1220cb43965SSowmini Varadhan struct rds_connection *conn = cp->cp_conn; 12300e0f34cSAndy Grover 12400e0f34cSAndy Grover rdsdebug("conn %p for %pI4 to %pI4 reconnect jiffies %lu\n", 12500e0f34cSAndy Grover conn, &conn->c_laddr, &conn->c_faddr, 1260cb43965SSowmini Varadhan cp->cp_reconnect_jiffies); 12700e0f34cSAndy Grover 1288315011aSSowmini Varadhan /* let peer with smaller addr initiate reconnect, to avoid duels */ 1298315011aSSowmini Varadhan if (conn->c_trans->t_type == RDS_TRANS_TCP && 1308315011aSSowmini Varadhan conn->c_laddr > conn->c_faddr) 1318315011aSSowmini Varadhan return; 1328315011aSSowmini Varadhan 1330cb43965SSowmini Varadhan set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags); 1340cb43965SSowmini Varadhan if (cp->cp_reconnect_jiffies == 0) { 1350cb43965SSowmini Varadhan cp->cp_reconnect_jiffies = rds_sysctl_reconnect_min_jiffies; 1360cb43965SSowmini Varadhan queue_delayed_work(rds_wq, &cp->cp_conn_w, 0); 13700e0f34cSAndy Grover return; 13800e0f34cSAndy Grover } 13900e0f34cSAndy Grover 14000e0f34cSAndy Grover get_random_bytes(&rand, sizeof(rand)); 14100e0f34cSAndy Grover rdsdebug("%lu delay %lu ceil conn %p for %pI4 -> %pI4\n", 1420cb43965SSowmini Varadhan rand % cp->cp_reconnect_jiffies, cp->cp_reconnect_jiffies, 14300e0f34cSAndy Grover conn, &conn->c_laddr, &conn->c_faddr); 1440cb43965SSowmini Varadhan queue_delayed_work(rds_wq, &cp->cp_conn_w, 1450cb43965SSowmini Varadhan rand % cp->cp_reconnect_jiffies); 14600e0f34cSAndy Grover 1470cb43965SSowmini Varadhan cp->cp_reconnect_jiffies = min(cp->cp_reconnect_jiffies * 2, 14800e0f34cSAndy Grover rds_sysctl_reconnect_max_jiffies); 14900e0f34cSAndy Grover } 15000e0f34cSAndy Grover 15100e0f34cSAndy Grover void rds_connect_worker(struct work_struct *work) 15200e0f34cSAndy Grover { 1530cb43965SSowmini Varadhan struct rds_conn_path *cp = container_of(work, 1540cb43965SSowmini Varadhan struct rds_conn_path, 1550cb43965SSowmini Varadhan cp_conn_w.work); 1560cb43965SSowmini Varadhan struct rds_connection *conn = cp->cp_conn; 15700e0f34cSAndy Grover int ret; 15800e0f34cSAndy Grover 1590cb43965SSowmini Varadhan clear_bit(RDS_RECONNECT_PENDING, &cp->cp_flags); 160b04e8554SSowmini Varadhan ret = rds_conn_path_transition(cp, RDS_CONN_DOWN, RDS_CONN_CONNECTING); 161b04e8554SSowmini Varadhan if (ret) { 162b04e8554SSowmini Varadhan ret = conn->c_trans->conn_path_connect(cp); 16300e0f34cSAndy Grover rdsdebug("conn %p for %pI4 to %pI4 dispatched, ret %d\n", 16400e0f34cSAndy Grover conn, &conn->c_laddr, &conn->c_faddr, ret); 16500e0f34cSAndy Grover 16600e0f34cSAndy Grover if (ret) { 1670cb43965SSowmini Varadhan if (rds_conn_path_transition(cp, 1680cb43965SSowmini Varadhan RDS_CONN_CONNECTING, 1690cb43965SSowmini Varadhan RDS_CONN_DOWN)) 1700cb43965SSowmini Varadhan rds_queue_reconnect(cp); 17100e0f34cSAndy Grover else 172fb1b3dc4SSowmini Varadhan rds_conn_path_error(cp, 173fb1b3dc4SSowmini Varadhan "RDS: connect failed\n"); 17400e0f34cSAndy Grover } 17500e0f34cSAndy Grover } 17600e0f34cSAndy Grover } 17700e0f34cSAndy Grover 17800e0f34cSAndy Grover void rds_send_worker(struct work_struct *work) 17900e0f34cSAndy Grover { 1800cb43965SSowmini Varadhan struct rds_conn_path *cp = container_of(work, 1810cb43965SSowmini Varadhan struct rds_conn_path, 1820cb43965SSowmini Varadhan cp_send_w.work); 18300e0f34cSAndy Grover int ret; 18400e0f34cSAndy Grover 1850cb43965SSowmini Varadhan if (rds_conn_path_state(cp) == RDS_CONN_UP) { 1860cb43965SSowmini Varadhan clear_bit(RDS_LL_SEND_FULL, &cp->cp_flags); 1871f9ecd7eSSowmini Varadhan ret = rds_send_xmit(cp); 188db6526dcSSantosh Shilimkar cond_resched(); 1890cb43965SSowmini Varadhan rdsdebug("conn %p ret %d\n", cp->cp_conn, ret); 19000e0f34cSAndy Grover switch (ret) { 19100e0f34cSAndy Grover case -EAGAIN: 19200e0f34cSAndy Grover rds_stats_inc(s_send_immediate_retry); 1930cb43965SSowmini Varadhan queue_delayed_work(rds_wq, &cp->cp_send_w, 0); 19400e0f34cSAndy Grover break; 19500e0f34cSAndy Grover case -ENOMEM: 19600e0f34cSAndy Grover rds_stats_inc(s_send_delayed_retry); 1970cb43965SSowmini Varadhan queue_delayed_work(rds_wq, &cp->cp_send_w, 2); 19800e0f34cSAndy Grover default: 19900e0f34cSAndy Grover break; 20000e0f34cSAndy Grover } 20100e0f34cSAndy Grover } 20200e0f34cSAndy Grover } 20300e0f34cSAndy Grover 20400e0f34cSAndy Grover void rds_recv_worker(struct work_struct *work) 20500e0f34cSAndy Grover { 2060cb43965SSowmini Varadhan struct rds_conn_path *cp = container_of(work, 2070cb43965SSowmini Varadhan struct rds_conn_path, 2080cb43965SSowmini Varadhan cp_recv_w.work); 20900e0f34cSAndy Grover int ret; 21000e0f34cSAndy Grover 2110cb43965SSowmini Varadhan if (rds_conn_path_state(cp) == RDS_CONN_UP) { 2122da43c4aSSowmini Varadhan ret = cp->cp_conn->c_trans->recv_path(cp); 2130cb43965SSowmini Varadhan rdsdebug("conn %p ret %d\n", cp->cp_conn, ret); 21400e0f34cSAndy Grover switch (ret) { 21500e0f34cSAndy Grover case -EAGAIN: 21600e0f34cSAndy Grover rds_stats_inc(s_recv_immediate_retry); 2170cb43965SSowmini Varadhan queue_delayed_work(rds_wq, &cp->cp_recv_w, 0); 21800e0f34cSAndy Grover break; 21900e0f34cSAndy Grover case -ENOMEM: 22000e0f34cSAndy Grover rds_stats_inc(s_recv_delayed_retry); 2210cb43965SSowmini Varadhan queue_delayed_work(rds_wq, &cp->cp_recv_w, 2); 22200e0f34cSAndy Grover default: 22300e0f34cSAndy Grover break; 22400e0f34cSAndy Grover } 22500e0f34cSAndy Grover } 22600e0f34cSAndy Grover } 22700e0f34cSAndy Grover 2282dc39357SAndy Grover void rds_shutdown_worker(struct work_struct *work) 2292dc39357SAndy Grover { 2300cb43965SSowmini Varadhan struct rds_conn_path *cp = container_of(work, 2310cb43965SSowmini Varadhan struct rds_conn_path, 2320cb43965SSowmini Varadhan cp_down_w); 2332dc39357SAndy Grover 234d769ef81SSowmini Varadhan rds_conn_shutdown(cp); 2352dc39357SAndy Grover } 2362dc39357SAndy Grover 23700e0f34cSAndy Grover void rds_threads_exit(void) 23800e0f34cSAndy Grover { 23900e0f34cSAndy Grover destroy_workqueue(rds_wq); 24000e0f34cSAndy Grover } 24100e0f34cSAndy Grover 242ef87b7eaSZach Brown int rds_threads_init(void) 24300e0f34cSAndy Grover { 24480c51be5SZach Brown rds_wq = create_singlethread_workqueue("krdsd"); 2458690bfa1SAndy Grover if (!rds_wq) 24600e0f34cSAndy Grover return -ENOMEM; 24700e0f34cSAndy Grover 24800e0f34cSAndy Grover return 0; 24900e0f34cSAndy Grover } 250