100e0f34cSAndy Grover /* 200e0f34cSAndy Grover * Copyright (c) 2006 Oracle. All rights reserved. 300e0f34cSAndy Grover * 400e0f34cSAndy Grover * This software is available to you under a choice of one of two 500e0f34cSAndy Grover * licenses. You may choose to be licensed under the terms of the GNU 600e0f34cSAndy Grover * General Public License (GPL) Version 2, available from the file 700e0f34cSAndy Grover * COPYING in the main directory of this source tree, or the 800e0f34cSAndy Grover * OpenIB.org BSD license below: 900e0f34cSAndy Grover * 1000e0f34cSAndy Grover * Redistribution and use in source and binary forms, with or 1100e0f34cSAndy Grover * without modification, are permitted provided that the following 1200e0f34cSAndy Grover * conditions are met: 1300e0f34cSAndy Grover * 1400e0f34cSAndy Grover * - Redistributions of source code must retain the above 1500e0f34cSAndy Grover * copyright notice, this list of conditions and the following 1600e0f34cSAndy Grover * disclaimer. 1700e0f34cSAndy Grover * 1800e0f34cSAndy Grover * - Redistributions in binary form must reproduce the above 1900e0f34cSAndy Grover * copyright notice, this list of conditions and the following 2000e0f34cSAndy Grover * disclaimer in the documentation and/or other materials 2100e0f34cSAndy Grover * provided with the distribution. 2200e0f34cSAndy Grover * 2300e0f34cSAndy Grover * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 2400e0f34cSAndy Grover * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 2500e0f34cSAndy Grover * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 2600e0f34cSAndy Grover * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 2700e0f34cSAndy Grover * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 2800e0f34cSAndy Grover * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 2900e0f34cSAndy Grover * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 3000e0f34cSAndy Grover * SOFTWARE. 3100e0f34cSAndy Grover * 3200e0f34cSAndy Grover */ 3300e0f34cSAndy Grover #include <linux/kernel.h> 3400e0f34cSAndy Grover #include <linux/list.h> 355a0e3ad6STejun Heo #include <linux/slab.h> 36bc3b2d7fSPaul Gortmaker #include <linux/export.h> 3700e0f34cSAndy Grover #include <net/inet_hashtables.h> 3800e0f34cSAndy Grover 390cb43965SSowmini Varadhan #include "rds_single_path.h" 4000e0f34cSAndy Grover #include "rds.h" 4100e0f34cSAndy Grover #include "loop.h" 4200e0f34cSAndy Grover 4300e0f34cSAndy Grover #define RDS_CONNECTION_HASH_BITS 12 4400e0f34cSAndy Grover #define RDS_CONNECTION_HASH_ENTRIES (1 << RDS_CONNECTION_HASH_BITS) 4500e0f34cSAndy Grover #define RDS_CONNECTION_HASH_MASK (RDS_CONNECTION_HASH_ENTRIES - 1) 4600e0f34cSAndy Grover 4700e0f34cSAndy Grover /* converting this to RCU is a chore for another day.. */ 4800e0f34cSAndy Grover static DEFINE_SPINLOCK(rds_conn_lock); 4900e0f34cSAndy Grover static unsigned long rds_conn_count; 5000e0f34cSAndy Grover static struct hlist_head rds_conn_hash[RDS_CONNECTION_HASH_ENTRIES]; 5100e0f34cSAndy Grover static struct kmem_cache *rds_conn_slab; 5200e0f34cSAndy Grover 5300e0f34cSAndy Grover static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr) 5400e0f34cSAndy Grover { 551bbdceefSHannes Frederic Sowa static u32 rds_hash_secret __read_mostly; 561bbdceefSHannes Frederic Sowa 571bbdceefSHannes Frederic Sowa unsigned long hash; 581bbdceefSHannes Frederic Sowa 591bbdceefSHannes Frederic Sowa net_get_random_once(&rds_hash_secret, sizeof(rds_hash_secret)); 601bbdceefSHannes Frederic Sowa 6100e0f34cSAndy Grover /* Pass NULL, don't need struct net for hash */ 621bbdceefSHannes Frederic Sowa hash = __inet_ehashfn(be32_to_cpu(laddr), 0, 6365cd8033SHannes Frederic Sowa be32_to_cpu(faddr), 0, 641bbdceefSHannes Frederic Sowa rds_hash_secret); 6500e0f34cSAndy Grover return &rds_conn_hash[hash & RDS_CONNECTION_HASH_MASK]; 6600e0f34cSAndy Grover } 6700e0f34cSAndy Grover 6800e0f34cSAndy Grover #define rds_conn_info_set(var, test, suffix) do { \ 6900e0f34cSAndy Grover if (test) \ 7000e0f34cSAndy Grover var |= RDS_INFO_CONNECTION_FLAG_##suffix; \ 7100e0f34cSAndy Grover } while (0) 7200e0f34cSAndy Grover 73bcf50ef2SChris Mason /* rcu read lock must be held or the connection spinlock */ 748f384c01SSowmini Varadhan static struct rds_connection *rds_conn_lookup(struct net *net, 758f384c01SSowmini Varadhan struct hlist_head *head, 7600e0f34cSAndy Grover __be32 laddr, __be32 faddr, 7700e0f34cSAndy Grover struct rds_transport *trans) 7800e0f34cSAndy Grover { 7900e0f34cSAndy Grover struct rds_connection *conn, *ret = NULL; 8000e0f34cSAndy Grover 81b67bfe0dSSasha Levin hlist_for_each_entry_rcu(conn, head, c_hash_node) { 8200e0f34cSAndy Grover if (conn->c_faddr == faddr && conn->c_laddr == laddr && 838f384c01SSowmini Varadhan conn->c_trans == trans && net == rds_conn_net(conn)) { 8400e0f34cSAndy Grover ret = conn; 8500e0f34cSAndy Grover break; 8600e0f34cSAndy Grover } 8700e0f34cSAndy Grover } 8800e0f34cSAndy Grover rdsdebug("returning conn %p for %pI4 -> %pI4\n", ret, 8900e0f34cSAndy Grover &laddr, &faddr); 9000e0f34cSAndy Grover return ret; 9100e0f34cSAndy Grover } 9200e0f34cSAndy Grover 9300e0f34cSAndy Grover /* 9400e0f34cSAndy Grover * This is called by transports as they're bringing down a connection. 9500e0f34cSAndy Grover * It clears partial message state so that the transport can start sending 9600e0f34cSAndy Grover * and receiving over this connection again in the future. It is up to 9700e0f34cSAndy Grover * the transport to have serialized this call with its send and recv. 9800e0f34cSAndy Grover */ 99ff51bf84Sstephen hemminger static void rds_conn_reset(struct rds_connection *conn) 10000e0f34cSAndy Grover { 10100e0f34cSAndy Grover rdsdebug("connection %pI4 to %pI4 reset\n", 10200e0f34cSAndy Grover &conn->c_laddr, &conn->c_faddr); 10300e0f34cSAndy Grover 10400e0f34cSAndy Grover rds_stats_inc(s_conn_reset); 10500e0f34cSAndy Grover rds_send_reset(conn); 10600e0f34cSAndy Grover conn->c_flags = 0; 10700e0f34cSAndy Grover 10800e0f34cSAndy Grover /* Do not clear next_rx_seq here, else we cannot distinguish 10900e0f34cSAndy Grover * retransmitted packets from new packets, and will hand all 11000e0f34cSAndy Grover * of them to the application. That is not consistent with the 11100e0f34cSAndy Grover * reliability guarantees of RDS. */ 11200e0f34cSAndy Grover } 11300e0f34cSAndy Grover 11400e0f34cSAndy Grover /* 11500e0f34cSAndy Grover * There is only every one 'conn' for a given pair of addresses in the 11600e0f34cSAndy Grover * system at a time. They contain messages to be retransmitted and so 11700e0f34cSAndy Grover * span the lifetime of the actual underlying transport connections. 11800e0f34cSAndy Grover * 11900e0f34cSAndy Grover * For now they are not garbage collected once they're created. They 12000e0f34cSAndy Grover * are torn down as the module is removed, if ever. 12100e0f34cSAndy Grover */ 122d5a8ac28SSowmini Varadhan static struct rds_connection *__rds_conn_create(struct net *net, 123d5a8ac28SSowmini Varadhan __be32 laddr, __be32 faddr, 12400e0f34cSAndy Grover struct rds_transport *trans, gfp_t gfp, 12500e0f34cSAndy Grover int is_outgoing) 12600e0f34cSAndy Grover { 127cb24405eSAndy Grover struct rds_connection *conn, *parent = NULL; 12800e0f34cSAndy Grover struct hlist_head *head = rds_conn_bucket(laddr, faddr); 1295adb5bc6SZach Brown struct rds_transport *loop_trans; 13000e0f34cSAndy Grover unsigned long flags; 13100e0f34cSAndy Grover int ret; 13200e0f34cSAndy Grover 133bcf50ef2SChris Mason rcu_read_lock(); 1348f384c01SSowmini Varadhan conn = rds_conn_lookup(net, head, laddr, faddr, trans); 135f64f9e71SJoe Perches if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport && 1361789b2c0SSowmini Varadhan laddr == faddr && !is_outgoing) { 13700e0f34cSAndy Grover /* This is a looped back IB connection, and we're 13800e0f34cSAndy Grover * called by the code handling the incoming connect. 13900e0f34cSAndy Grover * We need a second connection object into which we 14000e0f34cSAndy Grover * can stick the other QP. */ 14100e0f34cSAndy Grover parent = conn; 14200e0f34cSAndy Grover conn = parent->c_passive; 14300e0f34cSAndy Grover } 144bcf50ef2SChris Mason rcu_read_unlock(); 14500e0f34cSAndy Grover if (conn) 14600e0f34cSAndy Grover goto out; 14700e0f34cSAndy Grover 14805a178ecSWei Yongjun conn = kmem_cache_zalloc(rds_conn_slab, gfp); 1498690bfa1SAndy Grover if (!conn) { 15000e0f34cSAndy Grover conn = ERR_PTR(-ENOMEM); 15100e0f34cSAndy Grover goto out; 15200e0f34cSAndy Grover } 15300e0f34cSAndy Grover 15400e0f34cSAndy Grover INIT_HLIST_NODE(&conn->c_hash_node); 15500e0f34cSAndy Grover conn->c_laddr = laddr; 15600e0f34cSAndy Grover conn->c_faddr = faddr; 15700e0f34cSAndy Grover spin_lock_init(&conn->c_lock); 15800e0f34cSAndy Grover conn->c_next_tx_seq = 1; 1590cb43965SSowmini Varadhan conn->c_path[0].cp_conn = conn; 160d5a8ac28SSowmini Varadhan rds_conn_net_set(conn, net); 16100e0f34cSAndy Grover 1620f4b1c7eSZach Brown init_waitqueue_head(&conn->c_waitq); 16300e0f34cSAndy Grover INIT_LIST_HEAD(&conn->c_send_queue); 16400e0f34cSAndy Grover INIT_LIST_HEAD(&conn->c_retrans); 16500e0f34cSAndy Grover 16600e0f34cSAndy Grover ret = rds_cong_get_maps(conn); 16700e0f34cSAndy Grover if (ret) { 16800e0f34cSAndy Grover kmem_cache_free(rds_conn_slab, conn); 16900e0f34cSAndy Grover conn = ERR_PTR(ret); 17000e0f34cSAndy Grover goto out; 17100e0f34cSAndy Grover } 17200e0f34cSAndy Grover 17300e0f34cSAndy Grover /* 17400e0f34cSAndy Grover * This is where a connection becomes loopback. If *any* RDS sockets 17500e0f34cSAndy Grover * can bind to the destination address then we'd rather the messages 17600e0f34cSAndy Grover * flow through loopback rather than either transport. 17700e0f34cSAndy Grover */ 178d5a8ac28SSowmini Varadhan loop_trans = rds_trans_get_preferred(net, faddr); 1795adb5bc6SZach Brown if (loop_trans) { 1805adb5bc6SZach Brown rds_trans_put(loop_trans); 18100e0f34cSAndy Grover conn->c_loopback = 1; 18200e0f34cSAndy Grover if (is_outgoing && trans->t_prefer_loopback) { 18300e0f34cSAndy Grover /* "outgoing" connection - and the transport 18400e0f34cSAndy Grover * says it wants the connection handled by the 18500e0f34cSAndy Grover * loopback transport. This is what TCP does. 18600e0f34cSAndy Grover */ 18700e0f34cSAndy Grover trans = &rds_loop_transport; 18800e0f34cSAndy Grover } 18900e0f34cSAndy Grover } 19000e0f34cSAndy Grover 19100e0f34cSAndy Grover conn->c_trans = trans; 19200e0f34cSAndy Grover 19300e0f34cSAndy Grover ret = trans->conn_alloc(conn, gfp); 19400e0f34cSAndy Grover if (ret) { 19500e0f34cSAndy Grover kmem_cache_free(rds_conn_slab, conn); 19600e0f34cSAndy Grover conn = ERR_PTR(ret); 19700e0f34cSAndy Grover goto out; 19800e0f34cSAndy Grover } 19900e0f34cSAndy Grover 20000e0f34cSAndy Grover atomic_set(&conn->c_state, RDS_CONN_DOWN); 201443be0e5SSowmini Varadhan conn->c_send_gen = 0; 2020cb43965SSowmini Varadhan conn->c_path[0].cp_outgoing = (is_outgoing ? 1 : 0); 20300e0f34cSAndy Grover conn->c_reconnect_jiffies = 0; 20400e0f34cSAndy Grover INIT_DELAYED_WORK(&conn->c_send_w, rds_send_worker); 20500e0f34cSAndy Grover INIT_DELAYED_WORK(&conn->c_recv_w, rds_recv_worker); 20600e0f34cSAndy Grover INIT_DELAYED_WORK(&conn->c_conn_w, rds_connect_worker); 20700e0f34cSAndy Grover INIT_WORK(&conn->c_down_w, rds_shutdown_worker); 20800e0f34cSAndy Grover mutex_init(&conn->c_cm_lock); 20900e0f34cSAndy Grover conn->c_flags = 0; 21000e0f34cSAndy Grover 21100e0f34cSAndy Grover rdsdebug("allocated conn %p for %pI4 -> %pI4 over %s %s\n", 21200e0f34cSAndy Grover conn, &laddr, &faddr, 21300e0f34cSAndy Grover trans->t_name ? trans->t_name : "[unknown]", 21400e0f34cSAndy Grover is_outgoing ? "(outgoing)" : ""); 21500e0f34cSAndy Grover 216cb24405eSAndy Grover /* 217cb24405eSAndy Grover * Since we ran without holding the conn lock, someone could 218cb24405eSAndy Grover * have created the same conn (either normal or passive) in the 219cb24405eSAndy Grover * interim. We check while holding the lock. If we won, we complete 220cb24405eSAndy Grover * init and return our conn. If we lost, we rollback and return the 221cb24405eSAndy Grover * other one. 222cb24405eSAndy Grover */ 22300e0f34cSAndy Grover spin_lock_irqsave(&rds_conn_lock, flags); 224cb24405eSAndy Grover if (parent) { 225cb24405eSAndy Grover /* Creating passive conn */ 226cb24405eSAndy Grover if (parent->c_passive) { 22700e0f34cSAndy Grover trans->conn_free(conn->c_transport_data); 22800e0f34cSAndy Grover kmem_cache_free(rds_conn_slab, conn); 229cb24405eSAndy Grover conn = parent->c_passive; 23000e0f34cSAndy Grover } else { 231cb24405eSAndy Grover parent->c_passive = conn; 23200e0f34cSAndy Grover rds_cong_add_conn(conn); 23300e0f34cSAndy Grover rds_conn_count++; 23400e0f34cSAndy Grover } 235cb24405eSAndy Grover } else { 236cb24405eSAndy Grover /* Creating normal conn */ 237cb24405eSAndy Grover struct rds_connection *found; 23800e0f34cSAndy Grover 2398f384c01SSowmini Varadhan found = rds_conn_lookup(net, head, laddr, faddr, trans); 240cb24405eSAndy Grover if (found) { 241cb24405eSAndy Grover trans->conn_free(conn->c_transport_data); 242cb24405eSAndy Grover kmem_cache_free(rds_conn_slab, conn); 243cb24405eSAndy Grover conn = found; 244cb24405eSAndy Grover } else { 245bcf50ef2SChris Mason hlist_add_head_rcu(&conn->c_hash_node, head); 246cb24405eSAndy Grover rds_cong_add_conn(conn); 247cb24405eSAndy Grover rds_conn_count++; 248cb24405eSAndy Grover } 249cb24405eSAndy Grover } 25000e0f34cSAndy Grover spin_unlock_irqrestore(&rds_conn_lock, flags); 25100e0f34cSAndy Grover 25200e0f34cSAndy Grover out: 25300e0f34cSAndy Grover return conn; 25400e0f34cSAndy Grover } 25500e0f34cSAndy Grover 256d5a8ac28SSowmini Varadhan struct rds_connection *rds_conn_create(struct net *net, 257d5a8ac28SSowmini Varadhan __be32 laddr, __be32 faddr, 25800e0f34cSAndy Grover struct rds_transport *trans, gfp_t gfp) 25900e0f34cSAndy Grover { 260d5a8ac28SSowmini Varadhan return __rds_conn_create(net, laddr, faddr, trans, gfp, 0); 26100e0f34cSAndy Grover } 262616b757aSAndy Grover EXPORT_SYMBOL_GPL(rds_conn_create); 26300e0f34cSAndy Grover 264d5a8ac28SSowmini Varadhan struct rds_connection *rds_conn_create_outgoing(struct net *net, 265d5a8ac28SSowmini Varadhan __be32 laddr, __be32 faddr, 26600e0f34cSAndy Grover struct rds_transport *trans, gfp_t gfp) 26700e0f34cSAndy Grover { 268d5a8ac28SSowmini Varadhan return __rds_conn_create(net, laddr, faddr, trans, gfp, 1); 26900e0f34cSAndy Grover } 270616b757aSAndy Grover EXPORT_SYMBOL_GPL(rds_conn_create_outgoing); 27100e0f34cSAndy Grover 2722dc39357SAndy Grover void rds_conn_shutdown(struct rds_connection *conn) 2732dc39357SAndy Grover { 2742dc39357SAndy Grover /* shut it down unless it's down already */ 2752dc39357SAndy Grover if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_DOWN)) { 2762dc39357SAndy Grover /* 2772dc39357SAndy Grover * Quiesce the connection mgmt handlers before we start tearing 2782dc39357SAndy Grover * things down. We don't hold the mutex for the entire 2792dc39357SAndy Grover * duration of the shutdown operation, else we may be 2802dc39357SAndy Grover * deadlocking with the CM handler. Instead, the CM event 2812dc39357SAndy Grover * handler is supposed to check for state DISCONNECTING 2822dc39357SAndy Grover */ 2832dc39357SAndy Grover mutex_lock(&conn->c_cm_lock); 2842dc39357SAndy Grover if (!rds_conn_transition(conn, RDS_CONN_UP, RDS_CONN_DISCONNECTING) 2852dc39357SAndy Grover && !rds_conn_transition(conn, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) { 2862dc39357SAndy Grover rds_conn_error(conn, "shutdown called in state %d\n", 2872dc39357SAndy Grover atomic_read(&conn->c_state)); 2882dc39357SAndy Grover mutex_unlock(&conn->c_cm_lock); 2892dc39357SAndy Grover return; 2902dc39357SAndy Grover } 2912dc39357SAndy Grover mutex_unlock(&conn->c_cm_lock); 2922dc39357SAndy Grover 2930f4b1c7eSZach Brown wait_event(conn->c_waitq, 2940f4b1c7eSZach Brown !test_bit(RDS_IN_XMIT, &conn->c_flags)); 29573ce4317Ssantosh.shilimkar@oracle.com wait_event(conn->c_waitq, 29673ce4317Ssantosh.shilimkar@oracle.com !test_bit(RDS_RECV_REFILL, &conn->c_flags)); 2977e3f2952SChris Mason 2982dc39357SAndy Grover conn->c_trans->conn_shutdown(conn); 2992dc39357SAndy Grover rds_conn_reset(conn); 3002dc39357SAndy Grover 3012dc39357SAndy Grover if (!rds_conn_transition(conn, RDS_CONN_DISCONNECTING, RDS_CONN_DOWN)) { 3022dc39357SAndy Grover /* This can happen - eg when we're in the middle of tearing 3032dc39357SAndy Grover * down the connection, and someone unloads the rds module. 3042dc39357SAndy Grover * Quite reproduceable with loopback connections. 3052dc39357SAndy Grover * Mostly harmless. 3062dc39357SAndy Grover */ 3072dc39357SAndy Grover rds_conn_error(conn, 3082dc39357SAndy Grover "%s: failed to transition to state DOWN, " 3092dc39357SAndy Grover "current state is %d\n", 3102dc39357SAndy Grover __func__, 3112dc39357SAndy Grover atomic_read(&conn->c_state)); 3122dc39357SAndy Grover return; 3132dc39357SAndy Grover } 3142dc39357SAndy Grover } 3152dc39357SAndy Grover 3162dc39357SAndy Grover /* Then reconnect if it's still live. 3172dc39357SAndy Grover * The passive side of an IB loopback connection is never added 3182dc39357SAndy Grover * to the conn hash, so we never trigger a reconnect on this 3192dc39357SAndy Grover * conn - the reconnect is always triggered by the active peer. */ 3202dc39357SAndy Grover cancel_delayed_work_sync(&conn->c_conn_w); 321bcf50ef2SChris Mason rcu_read_lock(); 322bcf50ef2SChris Mason if (!hlist_unhashed(&conn->c_hash_node)) { 323bcf50ef2SChris Mason rcu_read_unlock(); 3243b20fc38SSowmini Varadhan if (conn->c_trans->t_type != RDS_TRANS_TCP || 3250cb43965SSowmini Varadhan conn->c_path[0].cp_outgoing == 1) 3260cb43965SSowmini Varadhan rds_queue_reconnect(&conn->c_path[0]); 327bcf50ef2SChris Mason } else { 328bcf50ef2SChris Mason rcu_read_unlock(); 329bcf50ef2SChris Mason } 3302dc39357SAndy Grover } 3312dc39357SAndy Grover 3322dc39357SAndy Grover /* 3332dc39357SAndy Grover * Stop and free a connection. 334ffcec0e1SZach Brown * 335ffcec0e1SZach Brown * This can only be used in very limited circumstances. It assumes that once 336ffcec0e1SZach Brown * the conn has been shutdown that no one else is referencing the connection. 337ffcec0e1SZach Brown * We can only ensure this in the rmmod path in the current code. 3382dc39357SAndy Grover */ 33900e0f34cSAndy Grover void rds_conn_destroy(struct rds_connection *conn) 34000e0f34cSAndy Grover { 34100e0f34cSAndy Grover struct rds_message *rm, *rtmp; 342fe8ff6b5SZach Brown unsigned long flags; 34300e0f34cSAndy Grover 34400e0f34cSAndy Grover rdsdebug("freeing conn %p for %pI4 -> " 34500e0f34cSAndy Grover "%pI4\n", conn, &conn->c_laddr, 34600e0f34cSAndy Grover &conn->c_faddr); 34700e0f34cSAndy Grover 348abf45439SChris Mason /* Ensure conn will not be scheduled for reconnect */ 349abf45439SChris Mason spin_lock_irq(&rds_conn_lock); 350bcf50ef2SChris Mason hlist_del_init_rcu(&conn->c_hash_node); 351abf45439SChris Mason spin_unlock_irq(&rds_conn_lock); 352bcf50ef2SChris Mason synchronize_rcu(); 353bcf50ef2SChris Mason 354ffcec0e1SZach Brown /* shut the connection down */ 355ffcec0e1SZach Brown rds_conn_drop(conn); 356ffcec0e1SZach Brown flush_work(&conn->c_down_w); 35700e0f34cSAndy Grover 3584518071aSZach Brown /* make sure lingering queued work won't try to ref the conn */ 3594518071aSZach Brown cancel_delayed_work_sync(&conn->c_send_w); 3604518071aSZach Brown cancel_delayed_work_sync(&conn->c_recv_w); 3614518071aSZach Brown 36200e0f34cSAndy Grover /* tear down queued messages */ 36300e0f34cSAndy Grover list_for_each_entry_safe(rm, rtmp, 36400e0f34cSAndy Grover &conn->c_send_queue, 36500e0f34cSAndy Grover m_conn_item) { 36600e0f34cSAndy Grover list_del_init(&rm->m_conn_item); 36700e0f34cSAndy Grover BUG_ON(!list_empty(&rm->m_sock_item)); 36800e0f34cSAndy Grover rds_message_put(rm); 36900e0f34cSAndy Grover } 37000e0f34cSAndy Grover if (conn->c_xmit_rm) 37100e0f34cSAndy Grover rds_message_put(conn->c_xmit_rm); 37200e0f34cSAndy Grover 37300e0f34cSAndy Grover conn->c_trans->conn_free(conn->c_transport_data); 37400e0f34cSAndy Grover 37500e0f34cSAndy Grover /* 37600e0f34cSAndy Grover * The congestion maps aren't freed up here. They're 37700e0f34cSAndy Grover * freed by rds_cong_exit() after all the connections 37800e0f34cSAndy Grover * have been freed. 37900e0f34cSAndy Grover */ 38000e0f34cSAndy Grover rds_cong_remove_conn(conn); 38100e0f34cSAndy Grover 38200e0f34cSAndy Grover BUG_ON(!list_empty(&conn->c_retrans)); 38300e0f34cSAndy Grover kmem_cache_free(rds_conn_slab, conn); 38400e0f34cSAndy Grover 385fe8ff6b5SZach Brown spin_lock_irqsave(&rds_conn_lock, flags); 38600e0f34cSAndy Grover rds_conn_count--; 387fe8ff6b5SZach Brown spin_unlock_irqrestore(&rds_conn_lock, flags); 38800e0f34cSAndy Grover } 389616b757aSAndy Grover EXPORT_SYMBOL_GPL(rds_conn_destroy); 39000e0f34cSAndy Grover 39100e0f34cSAndy Grover static void rds_conn_message_info(struct socket *sock, unsigned int len, 39200e0f34cSAndy Grover struct rds_info_iterator *iter, 39300e0f34cSAndy Grover struct rds_info_lengths *lens, 39400e0f34cSAndy Grover int want_send) 39500e0f34cSAndy Grover { 39600e0f34cSAndy Grover struct hlist_head *head; 39700e0f34cSAndy Grover struct list_head *list; 39800e0f34cSAndy Grover struct rds_connection *conn; 39900e0f34cSAndy Grover struct rds_message *rm; 40000e0f34cSAndy Grover unsigned int total = 0; 401501dcccdSZach Brown unsigned long flags; 40200e0f34cSAndy Grover size_t i; 403*992c9ec5SSowmini Varadhan int j; 40400e0f34cSAndy Grover 40500e0f34cSAndy Grover len /= sizeof(struct rds_info_message); 40600e0f34cSAndy Grover 407bcf50ef2SChris Mason rcu_read_lock(); 40800e0f34cSAndy Grover 40900e0f34cSAndy Grover for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); 41000e0f34cSAndy Grover i++, head++) { 411b67bfe0dSSasha Levin hlist_for_each_entry_rcu(conn, head, c_hash_node) { 412*992c9ec5SSowmini Varadhan struct rds_conn_path *cp; 41300e0f34cSAndy Grover 414*992c9ec5SSowmini Varadhan for (j = 0; j < RDS_MPATH_WORKERS; j++) { 415*992c9ec5SSowmini Varadhan cp = &conn->c_path[j]; 416*992c9ec5SSowmini Varadhan if (want_send) 417*992c9ec5SSowmini Varadhan list = &cp->cp_send_queue; 418*992c9ec5SSowmini Varadhan else 419*992c9ec5SSowmini Varadhan list = &cp->cp_retrans; 420*992c9ec5SSowmini Varadhan 421*992c9ec5SSowmini Varadhan spin_lock_irqsave(&cp->cp_lock, flags); 42200e0f34cSAndy Grover 42300e0f34cSAndy Grover /* XXX too lazy to maintain counts.. */ 42400e0f34cSAndy Grover list_for_each_entry(rm, list, m_conn_item) { 42500e0f34cSAndy Grover total++; 42600e0f34cSAndy Grover if (total <= len) 427*992c9ec5SSowmini Varadhan rds_inc_info_copy(&rm->m_inc, 428*992c9ec5SSowmini Varadhan iter, 42900e0f34cSAndy Grover conn->c_laddr, 430*992c9ec5SSowmini Varadhan conn->c_faddr, 431*992c9ec5SSowmini Varadhan 0); 43200e0f34cSAndy Grover } 43300e0f34cSAndy Grover 434*992c9ec5SSowmini Varadhan spin_unlock_irqrestore(&cp->cp_lock, flags); 435*992c9ec5SSowmini Varadhan if (!conn->c_trans->t_mp_capable) 436*992c9ec5SSowmini Varadhan break; 437*992c9ec5SSowmini Varadhan } 43800e0f34cSAndy Grover } 43900e0f34cSAndy Grover } 440bcf50ef2SChris Mason rcu_read_unlock(); 44100e0f34cSAndy Grover 44200e0f34cSAndy Grover lens->nr = total; 44300e0f34cSAndy Grover lens->each = sizeof(struct rds_info_message); 44400e0f34cSAndy Grover } 44500e0f34cSAndy Grover 44600e0f34cSAndy Grover static void rds_conn_message_info_send(struct socket *sock, unsigned int len, 44700e0f34cSAndy Grover struct rds_info_iterator *iter, 44800e0f34cSAndy Grover struct rds_info_lengths *lens) 44900e0f34cSAndy Grover { 45000e0f34cSAndy Grover rds_conn_message_info(sock, len, iter, lens, 1); 45100e0f34cSAndy Grover } 45200e0f34cSAndy Grover 45300e0f34cSAndy Grover static void rds_conn_message_info_retrans(struct socket *sock, 45400e0f34cSAndy Grover unsigned int len, 45500e0f34cSAndy Grover struct rds_info_iterator *iter, 45600e0f34cSAndy Grover struct rds_info_lengths *lens) 45700e0f34cSAndy Grover { 45800e0f34cSAndy Grover rds_conn_message_info(sock, len, iter, lens, 0); 45900e0f34cSAndy Grover } 46000e0f34cSAndy Grover 46100e0f34cSAndy Grover void rds_for_each_conn_info(struct socket *sock, unsigned int len, 46200e0f34cSAndy Grover struct rds_info_iterator *iter, 46300e0f34cSAndy Grover struct rds_info_lengths *lens, 46400e0f34cSAndy Grover int (*visitor)(struct rds_connection *, void *), 46500e0f34cSAndy Grover size_t item_len) 46600e0f34cSAndy Grover { 46700e0f34cSAndy Grover uint64_t buffer[(item_len + 7) / 8]; 46800e0f34cSAndy Grover struct hlist_head *head; 46900e0f34cSAndy Grover struct rds_connection *conn; 47000e0f34cSAndy Grover size_t i; 47100e0f34cSAndy Grover 472bcf50ef2SChris Mason rcu_read_lock(); 47300e0f34cSAndy Grover 47400e0f34cSAndy Grover lens->nr = 0; 47500e0f34cSAndy Grover lens->each = item_len; 47600e0f34cSAndy Grover 47700e0f34cSAndy Grover for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); 47800e0f34cSAndy Grover i++, head++) { 479b67bfe0dSSasha Levin hlist_for_each_entry_rcu(conn, head, c_hash_node) { 48000e0f34cSAndy Grover 48100e0f34cSAndy Grover /* XXX no c_lock usage.. */ 48200e0f34cSAndy Grover if (!visitor(conn, buffer)) 48300e0f34cSAndy Grover continue; 48400e0f34cSAndy Grover 48500e0f34cSAndy Grover /* We copy as much as we can fit in the buffer, 48600e0f34cSAndy Grover * but we count all items so that the caller 48700e0f34cSAndy Grover * can resize the buffer. */ 48800e0f34cSAndy Grover if (len >= item_len) { 48900e0f34cSAndy Grover rds_info_copy(iter, buffer, item_len); 49000e0f34cSAndy Grover len -= item_len; 49100e0f34cSAndy Grover } 49200e0f34cSAndy Grover lens->nr++; 49300e0f34cSAndy Grover } 49400e0f34cSAndy Grover } 495bcf50ef2SChris Mason rcu_read_unlock(); 49600e0f34cSAndy Grover } 497616b757aSAndy Grover EXPORT_SYMBOL_GPL(rds_for_each_conn_info); 49800e0f34cSAndy Grover 499*992c9ec5SSowmini Varadhan void rds_walk_conn_path_info(struct socket *sock, unsigned int len, 500*992c9ec5SSowmini Varadhan struct rds_info_iterator *iter, 501*992c9ec5SSowmini Varadhan struct rds_info_lengths *lens, 502*992c9ec5SSowmini Varadhan int (*visitor)(struct rds_conn_path *, void *), 503*992c9ec5SSowmini Varadhan size_t item_len) 504*992c9ec5SSowmini Varadhan { 505*992c9ec5SSowmini Varadhan u64 buffer[(item_len + 7) / 8]; 506*992c9ec5SSowmini Varadhan struct hlist_head *head; 507*992c9ec5SSowmini Varadhan struct rds_connection *conn; 508*992c9ec5SSowmini Varadhan size_t i; 509*992c9ec5SSowmini Varadhan int j; 510*992c9ec5SSowmini Varadhan 511*992c9ec5SSowmini Varadhan rcu_read_lock(); 512*992c9ec5SSowmini Varadhan 513*992c9ec5SSowmini Varadhan lens->nr = 0; 514*992c9ec5SSowmini Varadhan lens->each = item_len; 515*992c9ec5SSowmini Varadhan 516*992c9ec5SSowmini Varadhan for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); 517*992c9ec5SSowmini Varadhan i++, head++) { 518*992c9ec5SSowmini Varadhan hlist_for_each_entry_rcu(conn, head, c_hash_node) { 519*992c9ec5SSowmini Varadhan struct rds_conn_path *cp; 520*992c9ec5SSowmini Varadhan 521*992c9ec5SSowmini Varadhan for (j = 0; j < RDS_MPATH_WORKERS; j++) { 522*992c9ec5SSowmini Varadhan cp = &conn->c_path[j]; 523*992c9ec5SSowmini Varadhan 524*992c9ec5SSowmini Varadhan /* XXX no cp_lock usage.. */ 525*992c9ec5SSowmini Varadhan if (!visitor(cp, buffer)) 526*992c9ec5SSowmini Varadhan continue; 527*992c9ec5SSowmini Varadhan if (!conn->c_trans->t_mp_capable) 528*992c9ec5SSowmini Varadhan break; 529*992c9ec5SSowmini Varadhan } 530*992c9ec5SSowmini Varadhan 531*992c9ec5SSowmini Varadhan /* We copy as much as we can fit in the buffer, 532*992c9ec5SSowmini Varadhan * but we count all items so that the caller 533*992c9ec5SSowmini Varadhan * can resize the buffer. 534*992c9ec5SSowmini Varadhan */ 535*992c9ec5SSowmini Varadhan if (len >= item_len) { 536*992c9ec5SSowmini Varadhan rds_info_copy(iter, buffer, item_len); 537*992c9ec5SSowmini Varadhan len -= item_len; 538*992c9ec5SSowmini Varadhan } 539*992c9ec5SSowmini Varadhan lens->nr++; 540*992c9ec5SSowmini Varadhan } 541*992c9ec5SSowmini Varadhan } 542*992c9ec5SSowmini Varadhan rcu_read_unlock(); 543*992c9ec5SSowmini Varadhan } 544*992c9ec5SSowmini Varadhan 545*992c9ec5SSowmini Varadhan static int rds_conn_info_visitor(struct rds_conn_path *cp, void *buffer) 54600e0f34cSAndy Grover { 54700e0f34cSAndy Grover struct rds_info_connection *cinfo = buffer; 54800e0f34cSAndy Grover 549*992c9ec5SSowmini Varadhan cinfo->next_tx_seq = cp->cp_next_tx_seq; 550*992c9ec5SSowmini Varadhan cinfo->next_rx_seq = cp->cp_next_rx_seq; 551*992c9ec5SSowmini Varadhan cinfo->laddr = cp->cp_conn->c_laddr; 552*992c9ec5SSowmini Varadhan cinfo->faddr = cp->cp_conn->c_faddr; 553*992c9ec5SSowmini Varadhan strncpy(cinfo->transport, cp->cp_conn->c_trans->t_name, 55400e0f34cSAndy Grover sizeof(cinfo->transport)); 55500e0f34cSAndy Grover cinfo->flags = 0; 55600e0f34cSAndy Grover 557*992c9ec5SSowmini Varadhan rds_conn_info_set(cinfo->flags, test_bit(RDS_IN_XMIT, &cp->cp_flags), 5580f4b1c7eSZach Brown SENDING); 55900e0f34cSAndy Grover /* XXX Future: return the state rather than these funky bits */ 56000e0f34cSAndy Grover rds_conn_info_set(cinfo->flags, 561*992c9ec5SSowmini Varadhan atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING, 56200e0f34cSAndy Grover CONNECTING); 56300e0f34cSAndy Grover rds_conn_info_set(cinfo->flags, 564*992c9ec5SSowmini Varadhan atomic_read(&cp->cp_state) == RDS_CONN_UP, 56500e0f34cSAndy Grover CONNECTED); 56600e0f34cSAndy Grover return 1; 56700e0f34cSAndy Grover } 56800e0f34cSAndy Grover 56900e0f34cSAndy Grover static void rds_conn_info(struct socket *sock, unsigned int len, 57000e0f34cSAndy Grover struct rds_info_iterator *iter, 57100e0f34cSAndy Grover struct rds_info_lengths *lens) 57200e0f34cSAndy Grover { 573*992c9ec5SSowmini Varadhan rds_walk_conn_path_info(sock, len, iter, lens, 57400e0f34cSAndy Grover rds_conn_info_visitor, 57500e0f34cSAndy Grover sizeof(struct rds_info_connection)); 57600e0f34cSAndy Grover } 57700e0f34cSAndy Grover 578ef87b7eaSZach Brown int rds_conn_init(void) 57900e0f34cSAndy Grover { 58000e0f34cSAndy Grover rds_conn_slab = kmem_cache_create("rds_connection", 58100e0f34cSAndy Grover sizeof(struct rds_connection), 58200e0f34cSAndy Grover 0, 0, NULL); 5838690bfa1SAndy Grover if (!rds_conn_slab) 58400e0f34cSAndy Grover return -ENOMEM; 58500e0f34cSAndy Grover 58600e0f34cSAndy Grover rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info); 58700e0f34cSAndy Grover rds_info_register_func(RDS_INFO_SEND_MESSAGES, 58800e0f34cSAndy Grover rds_conn_message_info_send); 58900e0f34cSAndy Grover rds_info_register_func(RDS_INFO_RETRANS_MESSAGES, 59000e0f34cSAndy Grover rds_conn_message_info_retrans); 59100e0f34cSAndy Grover 59200e0f34cSAndy Grover return 0; 59300e0f34cSAndy Grover } 59400e0f34cSAndy Grover 59500e0f34cSAndy Grover void rds_conn_exit(void) 59600e0f34cSAndy Grover { 59700e0f34cSAndy Grover rds_loop_exit(); 59800e0f34cSAndy Grover 59900e0f34cSAndy Grover WARN_ON(!hlist_empty(rds_conn_hash)); 60000e0f34cSAndy Grover 60100e0f34cSAndy Grover kmem_cache_destroy(rds_conn_slab); 60200e0f34cSAndy Grover 60300e0f34cSAndy Grover rds_info_deregister_func(RDS_INFO_CONNECTIONS, rds_conn_info); 60400e0f34cSAndy Grover rds_info_deregister_func(RDS_INFO_SEND_MESSAGES, 60500e0f34cSAndy Grover rds_conn_message_info_send); 60600e0f34cSAndy Grover rds_info_deregister_func(RDS_INFO_RETRANS_MESSAGES, 60700e0f34cSAndy Grover rds_conn_message_info_retrans); 60800e0f34cSAndy Grover } 60900e0f34cSAndy Grover 61000e0f34cSAndy Grover /* 61100e0f34cSAndy Grover * Force a disconnect 61200e0f34cSAndy Grover */ 6130cb43965SSowmini Varadhan void rds_conn_path_drop(struct rds_conn_path *cp) 6140cb43965SSowmini Varadhan { 6150cb43965SSowmini Varadhan atomic_set(&cp->cp_state, RDS_CONN_ERROR); 6160cb43965SSowmini Varadhan queue_work(rds_wq, &cp->cp_down_w); 6170cb43965SSowmini Varadhan } 6180cb43965SSowmini Varadhan EXPORT_SYMBOL_GPL(rds_conn_path_drop); 6190cb43965SSowmini Varadhan 62000e0f34cSAndy Grover void rds_conn_drop(struct rds_connection *conn) 62100e0f34cSAndy Grover { 6220cb43965SSowmini Varadhan rds_conn_path_drop(&conn->c_path[0]); 62300e0f34cSAndy Grover } 624616b757aSAndy Grover EXPORT_SYMBOL_GPL(rds_conn_drop); 62500e0f34cSAndy Grover 62600e0f34cSAndy Grover /* 627f3c6808dSZach Brown * If the connection is down, trigger a connect. We may have scheduled a 628f3c6808dSZach Brown * delayed reconnect however - in this case we should not interfere. 629f3c6808dSZach Brown */ 6303c0a5900SSowmini Varadhan void rds_conn_path_connect_if_down(struct rds_conn_path *cp) 6313c0a5900SSowmini Varadhan { 6323c0a5900SSowmini Varadhan if (rds_conn_path_state(cp) == RDS_CONN_DOWN && 6333c0a5900SSowmini Varadhan !test_and_set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags)) 6343c0a5900SSowmini Varadhan queue_delayed_work(rds_wq, &cp->cp_conn_w, 0); 6353c0a5900SSowmini Varadhan } 6363c0a5900SSowmini Varadhan 637f3c6808dSZach Brown void rds_conn_connect_if_down(struct rds_connection *conn) 638f3c6808dSZach Brown { 6393c0a5900SSowmini Varadhan WARN_ON(conn->c_trans->t_mp_capable); 6403c0a5900SSowmini Varadhan rds_conn_path_connect_if_down(&conn->c_path[0]); 641f3c6808dSZach Brown } 642f3c6808dSZach Brown EXPORT_SYMBOL_GPL(rds_conn_connect_if_down); 643f3c6808dSZach Brown 644f3c6808dSZach Brown /* 64500e0f34cSAndy Grover * An error occurred on the connection 64600e0f34cSAndy Grover */ 64700e0f34cSAndy Grover void 64800e0f34cSAndy Grover __rds_conn_error(struct rds_connection *conn, const char *fmt, ...) 64900e0f34cSAndy Grover { 65000e0f34cSAndy Grover va_list ap; 65100e0f34cSAndy Grover 65200e0f34cSAndy Grover va_start(ap, fmt); 65300e0f34cSAndy Grover vprintk(fmt, ap); 65400e0f34cSAndy Grover va_end(ap); 65500e0f34cSAndy Grover 65600e0f34cSAndy Grover rds_conn_drop(conn); 65700e0f34cSAndy Grover } 658