100e0f34cSAndy Grover /* 200e0f34cSAndy Grover * Copyright (c) 2006 Oracle. All rights reserved. 300e0f34cSAndy Grover * 400e0f34cSAndy Grover * This software is available to you under a choice of one of two 500e0f34cSAndy Grover * licenses. You may choose to be licensed under the terms of the GNU 600e0f34cSAndy Grover * General Public License (GPL) Version 2, available from the file 700e0f34cSAndy Grover * COPYING in the main directory of this source tree, or the 800e0f34cSAndy Grover * OpenIB.org BSD license below: 900e0f34cSAndy Grover * 1000e0f34cSAndy Grover * Redistribution and use in source and binary forms, with or 1100e0f34cSAndy Grover * without modification, are permitted provided that the following 1200e0f34cSAndy Grover * conditions are met: 1300e0f34cSAndy Grover * 1400e0f34cSAndy Grover * - Redistributions of source code must retain the above 1500e0f34cSAndy Grover * copyright notice, this list of conditions and the following 1600e0f34cSAndy Grover * disclaimer. 1700e0f34cSAndy Grover * 1800e0f34cSAndy Grover * - Redistributions in binary form must reproduce the above 1900e0f34cSAndy Grover * copyright notice, this list of conditions and the following 2000e0f34cSAndy Grover * disclaimer in the documentation and/or other materials 2100e0f34cSAndy Grover * provided with the distribution. 2200e0f34cSAndy Grover * 2300e0f34cSAndy Grover * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 2400e0f34cSAndy Grover * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 2500e0f34cSAndy Grover * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 2600e0f34cSAndy Grover * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 2700e0f34cSAndy Grover * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 2800e0f34cSAndy Grover * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 2900e0f34cSAndy Grover * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 3000e0f34cSAndy Grover * SOFTWARE. 3100e0f34cSAndy Grover * 3200e0f34cSAndy Grover */ 3300e0f34cSAndy Grover #include <linux/kernel.h> 3400e0f34cSAndy Grover #include <linux/list.h> 355a0e3ad6STejun Heo #include <linux/slab.h> 36bc3b2d7fSPaul Gortmaker #include <linux/export.h> 3700e0f34cSAndy Grover #include <net/inet_hashtables.h> 3800e0f34cSAndy Grover 3900e0f34cSAndy Grover #include "rds.h" 4000e0f34cSAndy Grover #include "loop.h" 4100e0f34cSAndy Grover 4200e0f34cSAndy Grover #define RDS_CONNECTION_HASH_BITS 12 4300e0f34cSAndy Grover #define RDS_CONNECTION_HASH_ENTRIES (1 << RDS_CONNECTION_HASH_BITS) 4400e0f34cSAndy Grover #define RDS_CONNECTION_HASH_MASK (RDS_CONNECTION_HASH_ENTRIES - 1) 4500e0f34cSAndy Grover 4600e0f34cSAndy Grover /* converting this to RCU is a chore for another day.. */ 4700e0f34cSAndy Grover static DEFINE_SPINLOCK(rds_conn_lock); 4800e0f34cSAndy Grover static unsigned long rds_conn_count; 4900e0f34cSAndy Grover static struct hlist_head rds_conn_hash[RDS_CONNECTION_HASH_ENTRIES]; 5000e0f34cSAndy Grover static struct kmem_cache *rds_conn_slab; 5100e0f34cSAndy Grover 5200e0f34cSAndy Grover static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr) 5300e0f34cSAndy Grover { 541bbdceefSHannes Frederic Sowa static u32 rds_hash_secret __read_mostly; 551bbdceefSHannes Frederic Sowa 561bbdceefSHannes Frederic Sowa unsigned long hash; 571bbdceefSHannes Frederic Sowa 581bbdceefSHannes Frederic Sowa net_get_random_once(&rds_hash_secret, sizeof(rds_hash_secret)); 591bbdceefSHannes Frederic Sowa 6000e0f34cSAndy Grover /* Pass NULL, don't need struct net for hash */ 611bbdceefSHannes Frederic Sowa hash = __inet_ehashfn(be32_to_cpu(laddr), 0, 6265cd8033SHannes Frederic Sowa be32_to_cpu(faddr), 0, 631bbdceefSHannes Frederic Sowa rds_hash_secret); 6400e0f34cSAndy Grover return &rds_conn_hash[hash & RDS_CONNECTION_HASH_MASK]; 6500e0f34cSAndy Grover } 6600e0f34cSAndy Grover 6700e0f34cSAndy Grover #define rds_conn_info_set(var, test, suffix) do { \ 6800e0f34cSAndy Grover if (test) \ 6900e0f34cSAndy Grover var |= RDS_INFO_CONNECTION_FLAG_##suffix; \ 7000e0f34cSAndy Grover } while (0) 7100e0f34cSAndy Grover 72bcf50ef2SChris Mason /* rcu read lock must be held or the connection spinlock */ 73*8f384c01SSowmini Varadhan static struct rds_connection *rds_conn_lookup(struct net *net, 74*8f384c01SSowmini Varadhan struct hlist_head *head, 7500e0f34cSAndy Grover __be32 laddr, __be32 faddr, 7600e0f34cSAndy Grover struct rds_transport *trans) 7700e0f34cSAndy Grover { 7800e0f34cSAndy Grover struct rds_connection *conn, *ret = NULL; 7900e0f34cSAndy Grover 80b67bfe0dSSasha Levin hlist_for_each_entry_rcu(conn, head, c_hash_node) { 8100e0f34cSAndy Grover if (conn->c_faddr == faddr && conn->c_laddr == laddr && 82*8f384c01SSowmini Varadhan conn->c_trans == trans && net == rds_conn_net(conn)) { 8300e0f34cSAndy Grover ret = conn; 8400e0f34cSAndy Grover break; 8500e0f34cSAndy Grover } 8600e0f34cSAndy Grover } 8700e0f34cSAndy Grover rdsdebug("returning conn %p for %pI4 -> %pI4\n", ret, 8800e0f34cSAndy Grover &laddr, &faddr); 8900e0f34cSAndy Grover return ret; 9000e0f34cSAndy Grover } 9100e0f34cSAndy Grover 9200e0f34cSAndy Grover /* 9300e0f34cSAndy Grover * This is called by transports as they're bringing down a connection. 9400e0f34cSAndy Grover * It clears partial message state so that the transport can start sending 9500e0f34cSAndy Grover * and receiving over this connection again in the future. It is up to 9600e0f34cSAndy Grover * the transport to have serialized this call with its send and recv. 9700e0f34cSAndy Grover */ 98ff51bf84Sstephen hemminger static void rds_conn_reset(struct rds_connection *conn) 9900e0f34cSAndy Grover { 10000e0f34cSAndy Grover rdsdebug("connection %pI4 to %pI4 reset\n", 10100e0f34cSAndy Grover &conn->c_laddr, &conn->c_faddr); 10200e0f34cSAndy Grover 10300e0f34cSAndy Grover rds_stats_inc(s_conn_reset); 10400e0f34cSAndy Grover rds_send_reset(conn); 10500e0f34cSAndy Grover conn->c_flags = 0; 10600e0f34cSAndy Grover 10700e0f34cSAndy Grover /* Do not clear next_rx_seq here, else we cannot distinguish 10800e0f34cSAndy Grover * retransmitted packets from new packets, and will hand all 10900e0f34cSAndy Grover * of them to the application. That is not consistent with the 11000e0f34cSAndy Grover * reliability guarantees of RDS. */ 11100e0f34cSAndy Grover } 11200e0f34cSAndy Grover 11300e0f34cSAndy Grover /* 11400e0f34cSAndy Grover * There is only every one 'conn' for a given pair of addresses in the 11500e0f34cSAndy Grover * system at a time. They contain messages to be retransmitted and so 11600e0f34cSAndy Grover * span the lifetime of the actual underlying transport connections. 11700e0f34cSAndy Grover * 11800e0f34cSAndy Grover * For now they are not garbage collected once they're created. They 11900e0f34cSAndy Grover * are torn down as the module is removed, if ever. 12000e0f34cSAndy Grover */ 121d5a8ac28SSowmini Varadhan static struct rds_connection *__rds_conn_create(struct net *net, 122d5a8ac28SSowmini Varadhan __be32 laddr, __be32 faddr, 12300e0f34cSAndy Grover struct rds_transport *trans, gfp_t gfp, 12400e0f34cSAndy Grover int is_outgoing) 12500e0f34cSAndy Grover { 126cb24405eSAndy Grover struct rds_connection *conn, *parent = NULL; 12700e0f34cSAndy Grover struct hlist_head *head = rds_conn_bucket(laddr, faddr); 1285adb5bc6SZach Brown struct rds_transport *loop_trans; 12900e0f34cSAndy Grover unsigned long flags; 13000e0f34cSAndy Grover int ret; 131f711a6aeSSowmini Varadhan struct rds_transport *otrans = trans; 13200e0f34cSAndy Grover 133f711a6aeSSowmini Varadhan if (!is_outgoing && otrans->t_type == RDS_TRANS_TCP) 134f711a6aeSSowmini Varadhan goto new_conn; 135bcf50ef2SChris Mason rcu_read_lock(); 136*8f384c01SSowmini Varadhan conn = rds_conn_lookup(net, head, laddr, faddr, trans); 137f64f9e71SJoe Perches if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport && 1381789b2c0SSowmini Varadhan laddr == faddr && !is_outgoing) { 13900e0f34cSAndy Grover /* This is a looped back IB connection, and we're 14000e0f34cSAndy Grover * called by the code handling the incoming connect. 14100e0f34cSAndy Grover * We need a second connection object into which we 14200e0f34cSAndy Grover * can stick the other QP. */ 14300e0f34cSAndy Grover parent = conn; 14400e0f34cSAndy Grover conn = parent->c_passive; 14500e0f34cSAndy Grover } 146bcf50ef2SChris Mason rcu_read_unlock(); 14700e0f34cSAndy Grover if (conn) 14800e0f34cSAndy Grover goto out; 14900e0f34cSAndy Grover 150f711a6aeSSowmini Varadhan new_conn: 15105a178ecSWei Yongjun conn = kmem_cache_zalloc(rds_conn_slab, gfp); 1528690bfa1SAndy Grover if (!conn) { 15300e0f34cSAndy Grover conn = ERR_PTR(-ENOMEM); 15400e0f34cSAndy Grover goto out; 15500e0f34cSAndy Grover } 15600e0f34cSAndy Grover 15700e0f34cSAndy Grover INIT_HLIST_NODE(&conn->c_hash_node); 15800e0f34cSAndy Grover conn->c_laddr = laddr; 15900e0f34cSAndy Grover conn->c_faddr = faddr; 16000e0f34cSAndy Grover spin_lock_init(&conn->c_lock); 16100e0f34cSAndy Grover conn->c_next_tx_seq = 1; 162d5a8ac28SSowmini Varadhan rds_conn_net_set(conn, net); 16300e0f34cSAndy Grover 1640f4b1c7eSZach Brown init_waitqueue_head(&conn->c_waitq); 16500e0f34cSAndy Grover INIT_LIST_HEAD(&conn->c_send_queue); 16600e0f34cSAndy Grover INIT_LIST_HEAD(&conn->c_retrans); 16700e0f34cSAndy Grover 16800e0f34cSAndy Grover ret = rds_cong_get_maps(conn); 16900e0f34cSAndy Grover if (ret) { 17000e0f34cSAndy Grover kmem_cache_free(rds_conn_slab, conn); 17100e0f34cSAndy Grover conn = ERR_PTR(ret); 17200e0f34cSAndy Grover goto out; 17300e0f34cSAndy Grover } 17400e0f34cSAndy Grover 17500e0f34cSAndy Grover /* 17600e0f34cSAndy Grover * This is where a connection becomes loopback. If *any* RDS sockets 17700e0f34cSAndy Grover * can bind to the destination address then we'd rather the messages 17800e0f34cSAndy Grover * flow through loopback rather than either transport. 17900e0f34cSAndy Grover */ 180d5a8ac28SSowmini Varadhan loop_trans = rds_trans_get_preferred(net, faddr); 1815adb5bc6SZach Brown if (loop_trans) { 1825adb5bc6SZach Brown rds_trans_put(loop_trans); 18300e0f34cSAndy Grover conn->c_loopback = 1; 18400e0f34cSAndy Grover if (is_outgoing && trans->t_prefer_loopback) { 18500e0f34cSAndy Grover /* "outgoing" connection - and the transport 18600e0f34cSAndy Grover * says it wants the connection handled by the 18700e0f34cSAndy Grover * loopback transport. This is what TCP does. 18800e0f34cSAndy Grover */ 18900e0f34cSAndy Grover trans = &rds_loop_transport; 19000e0f34cSAndy Grover } 19100e0f34cSAndy Grover } 19200e0f34cSAndy Grover 19300e0f34cSAndy Grover conn->c_trans = trans; 19400e0f34cSAndy Grover 19500e0f34cSAndy Grover ret = trans->conn_alloc(conn, gfp); 19600e0f34cSAndy Grover if (ret) { 19700e0f34cSAndy Grover kmem_cache_free(rds_conn_slab, conn); 19800e0f34cSAndy Grover conn = ERR_PTR(ret); 19900e0f34cSAndy Grover goto out; 20000e0f34cSAndy Grover } 20100e0f34cSAndy Grover 20200e0f34cSAndy Grover atomic_set(&conn->c_state, RDS_CONN_DOWN); 203443be0e5SSowmini Varadhan conn->c_send_gen = 0; 20400e0f34cSAndy Grover conn->c_reconnect_jiffies = 0; 20500e0f34cSAndy Grover INIT_DELAYED_WORK(&conn->c_send_w, rds_send_worker); 20600e0f34cSAndy Grover INIT_DELAYED_WORK(&conn->c_recv_w, rds_recv_worker); 20700e0f34cSAndy Grover INIT_DELAYED_WORK(&conn->c_conn_w, rds_connect_worker); 20800e0f34cSAndy Grover INIT_WORK(&conn->c_down_w, rds_shutdown_worker); 20900e0f34cSAndy Grover mutex_init(&conn->c_cm_lock); 21000e0f34cSAndy Grover conn->c_flags = 0; 21100e0f34cSAndy Grover 21200e0f34cSAndy Grover rdsdebug("allocated conn %p for %pI4 -> %pI4 over %s %s\n", 21300e0f34cSAndy Grover conn, &laddr, &faddr, 21400e0f34cSAndy Grover trans->t_name ? trans->t_name : "[unknown]", 21500e0f34cSAndy Grover is_outgoing ? "(outgoing)" : ""); 21600e0f34cSAndy Grover 217cb24405eSAndy Grover /* 218cb24405eSAndy Grover * Since we ran without holding the conn lock, someone could 219cb24405eSAndy Grover * have created the same conn (either normal or passive) in the 220cb24405eSAndy Grover * interim. We check while holding the lock. If we won, we complete 221cb24405eSAndy Grover * init and return our conn. If we lost, we rollback and return the 222cb24405eSAndy Grover * other one. 223cb24405eSAndy Grover */ 22400e0f34cSAndy Grover spin_lock_irqsave(&rds_conn_lock, flags); 225cb24405eSAndy Grover if (parent) { 226cb24405eSAndy Grover /* Creating passive conn */ 227cb24405eSAndy Grover if (parent->c_passive) { 22800e0f34cSAndy Grover trans->conn_free(conn->c_transport_data); 22900e0f34cSAndy Grover kmem_cache_free(rds_conn_slab, conn); 230cb24405eSAndy Grover conn = parent->c_passive; 23100e0f34cSAndy Grover } else { 232cb24405eSAndy Grover parent->c_passive = conn; 23300e0f34cSAndy Grover rds_cong_add_conn(conn); 23400e0f34cSAndy Grover rds_conn_count++; 23500e0f34cSAndy Grover } 236cb24405eSAndy Grover } else { 237cb24405eSAndy Grover /* Creating normal conn */ 238cb24405eSAndy Grover struct rds_connection *found; 23900e0f34cSAndy Grover 240c82ac7e6SSowmini Varadhan if (!is_outgoing && otrans->t_type == RDS_TRANS_TCP) 241c82ac7e6SSowmini Varadhan found = NULL; 242c82ac7e6SSowmini Varadhan else 243*8f384c01SSowmini Varadhan found = rds_conn_lookup(net, head, laddr, faddr, trans); 244cb24405eSAndy Grover if (found) { 245cb24405eSAndy Grover trans->conn_free(conn->c_transport_data); 246cb24405eSAndy Grover kmem_cache_free(rds_conn_slab, conn); 247cb24405eSAndy Grover conn = found; 248cb24405eSAndy Grover } else { 249c82ac7e6SSowmini Varadhan if ((is_outgoing && otrans->t_type == RDS_TRANS_TCP) || 250c82ac7e6SSowmini Varadhan (otrans->t_type != RDS_TRANS_TCP)) { 251c82ac7e6SSowmini Varadhan /* Only the active side should be added to 252c82ac7e6SSowmini Varadhan * reconnect list for TCP. 253c82ac7e6SSowmini Varadhan */ 254bcf50ef2SChris Mason hlist_add_head_rcu(&conn->c_hash_node, head); 255c82ac7e6SSowmini Varadhan } 256cb24405eSAndy Grover rds_cong_add_conn(conn); 257cb24405eSAndy Grover rds_conn_count++; 258cb24405eSAndy Grover } 259cb24405eSAndy Grover } 26000e0f34cSAndy Grover spin_unlock_irqrestore(&rds_conn_lock, flags); 26100e0f34cSAndy Grover 26200e0f34cSAndy Grover out: 26300e0f34cSAndy Grover return conn; 26400e0f34cSAndy Grover } 26500e0f34cSAndy Grover 266d5a8ac28SSowmini Varadhan struct rds_connection *rds_conn_create(struct net *net, 267d5a8ac28SSowmini Varadhan __be32 laddr, __be32 faddr, 26800e0f34cSAndy Grover struct rds_transport *trans, gfp_t gfp) 26900e0f34cSAndy Grover { 270d5a8ac28SSowmini Varadhan return __rds_conn_create(net, laddr, faddr, trans, gfp, 0); 27100e0f34cSAndy Grover } 272616b757aSAndy Grover EXPORT_SYMBOL_GPL(rds_conn_create); 27300e0f34cSAndy Grover 274d5a8ac28SSowmini Varadhan struct rds_connection *rds_conn_create_outgoing(struct net *net, 275d5a8ac28SSowmini Varadhan __be32 laddr, __be32 faddr, 27600e0f34cSAndy Grover struct rds_transport *trans, gfp_t gfp) 27700e0f34cSAndy Grover { 278d5a8ac28SSowmini Varadhan return __rds_conn_create(net, laddr, faddr, trans, gfp, 1); 27900e0f34cSAndy Grover } 280616b757aSAndy Grover EXPORT_SYMBOL_GPL(rds_conn_create_outgoing); 28100e0f34cSAndy Grover 2822dc39357SAndy Grover void rds_conn_shutdown(struct rds_connection *conn) 2832dc39357SAndy Grover { 2842dc39357SAndy Grover /* shut it down unless it's down already */ 2852dc39357SAndy Grover if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_DOWN)) { 2862dc39357SAndy Grover /* 2872dc39357SAndy Grover * Quiesce the connection mgmt handlers before we start tearing 2882dc39357SAndy Grover * things down. We don't hold the mutex for the entire 2892dc39357SAndy Grover * duration of the shutdown operation, else we may be 2902dc39357SAndy Grover * deadlocking with the CM handler. Instead, the CM event 2912dc39357SAndy Grover * handler is supposed to check for state DISCONNECTING 2922dc39357SAndy Grover */ 2932dc39357SAndy Grover mutex_lock(&conn->c_cm_lock); 2942dc39357SAndy Grover if (!rds_conn_transition(conn, RDS_CONN_UP, RDS_CONN_DISCONNECTING) 2952dc39357SAndy Grover && !rds_conn_transition(conn, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) { 2962dc39357SAndy Grover rds_conn_error(conn, "shutdown called in state %d\n", 2972dc39357SAndy Grover atomic_read(&conn->c_state)); 2982dc39357SAndy Grover mutex_unlock(&conn->c_cm_lock); 2992dc39357SAndy Grover return; 3002dc39357SAndy Grover } 3012dc39357SAndy Grover mutex_unlock(&conn->c_cm_lock); 3022dc39357SAndy Grover 3030f4b1c7eSZach Brown wait_event(conn->c_waitq, 3040f4b1c7eSZach Brown !test_bit(RDS_IN_XMIT, &conn->c_flags)); 30573ce4317Ssantosh.shilimkar@oracle.com wait_event(conn->c_waitq, 30673ce4317Ssantosh.shilimkar@oracle.com !test_bit(RDS_RECV_REFILL, &conn->c_flags)); 3077e3f2952SChris Mason 3082dc39357SAndy Grover conn->c_trans->conn_shutdown(conn); 3092dc39357SAndy Grover rds_conn_reset(conn); 3102dc39357SAndy Grover 3112dc39357SAndy Grover if (!rds_conn_transition(conn, RDS_CONN_DISCONNECTING, RDS_CONN_DOWN)) { 3122dc39357SAndy Grover /* This can happen - eg when we're in the middle of tearing 3132dc39357SAndy Grover * down the connection, and someone unloads the rds module. 3142dc39357SAndy Grover * Quite reproduceable with loopback connections. 3152dc39357SAndy Grover * Mostly harmless. 3162dc39357SAndy Grover */ 3172dc39357SAndy Grover rds_conn_error(conn, 3182dc39357SAndy Grover "%s: failed to transition to state DOWN, " 3192dc39357SAndy Grover "current state is %d\n", 3202dc39357SAndy Grover __func__, 3212dc39357SAndy Grover atomic_read(&conn->c_state)); 3222dc39357SAndy Grover return; 3232dc39357SAndy Grover } 3242dc39357SAndy Grover } 3252dc39357SAndy Grover 3262dc39357SAndy Grover /* Then reconnect if it's still live. 3272dc39357SAndy Grover * The passive side of an IB loopback connection is never added 3282dc39357SAndy Grover * to the conn hash, so we never trigger a reconnect on this 3292dc39357SAndy Grover * conn - the reconnect is always triggered by the active peer. */ 3302dc39357SAndy Grover cancel_delayed_work_sync(&conn->c_conn_w); 331bcf50ef2SChris Mason rcu_read_lock(); 332bcf50ef2SChris Mason if (!hlist_unhashed(&conn->c_hash_node)) { 333bcf50ef2SChris Mason rcu_read_unlock(); 3342dc39357SAndy Grover rds_queue_reconnect(conn); 335bcf50ef2SChris Mason } else { 336bcf50ef2SChris Mason rcu_read_unlock(); 337bcf50ef2SChris Mason } 3382dc39357SAndy Grover } 3392dc39357SAndy Grover 3402dc39357SAndy Grover /* 3412dc39357SAndy Grover * Stop and free a connection. 342ffcec0e1SZach Brown * 343ffcec0e1SZach Brown * This can only be used in very limited circumstances. It assumes that once 344ffcec0e1SZach Brown * the conn has been shutdown that no one else is referencing the connection. 345ffcec0e1SZach Brown * We can only ensure this in the rmmod path in the current code. 3462dc39357SAndy Grover */ 34700e0f34cSAndy Grover void rds_conn_destroy(struct rds_connection *conn) 34800e0f34cSAndy Grover { 34900e0f34cSAndy Grover struct rds_message *rm, *rtmp; 350fe8ff6b5SZach Brown unsigned long flags; 35100e0f34cSAndy Grover 35200e0f34cSAndy Grover rdsdebug("freeing conn %p for %pI4 -> " 35300e0f34cSAndy Grover "%pI4\n", conn, &conn->c_laddr, 35400e0f34cSAndy Grover &conn->c_faddr); 35500e0f34cSAndy Grover 356abf45439SChris Mason /* Ensure conn will not be scheduled for reconnect */ 357abf45439SChris Mason spin_lock_irq(&rds_conn_lock); 358bcf50ef2SChris Mason hlist_del_init_rcu(&conn->c_hash_node); 359abf45439SChris Mason spin_unlock_irq(&rds_conn_lock); 360bcf50ef2SChris Mason synchronize_rcu(); 361bcf50ef2SChris Mason 362ffcec0e1SZach Brown /* shut the connection down */ 363ffcec0e1SZach Brown rds_conn_drop(conn); 364ffcec0e1SZach Brown flush_work(&conn->c_down_w); 36500e0f34cSAndy Grover 3664518071aSZach Brown /* make sure lingering queued work won't try to ref the conn */ 3674518071aSZach Brown cancel_delayed_work_sync(&conn->c_send_w); 3684518071aSZach Brown cancel_delayed_work_sync(&conn->c_recv_w); 3694518071aSZach Brown 37000e0f34cSAndy Grover /* tear down queued messages */ 37100e0f34cSAndy Grover list_for_each_entry_safe(rm, rtmp, 37200e0f34cSAndy Grover &conn->c_send_queue, 37300e0f34cSAndy Grover m_conn_item) { 37400e0f34cSAndy Grover list_del_init(&rm->m_conn_item); 37500e0f34cSAndy Grover BUG_ON(!list_empty(&rm->m_sock_item)); 37600e0f34cSAndy Grover rds_message_put(rm); 37700e0f34cSAndy Grover } 37800e0f34cSAndy Grover if (conn->c_xmit_rm) 37900e0f34cSAndy Grover rds_message_put(conn->c_xmit_rm); 38000e0f34cSAndy Grover 38100e0f34cSAndy Grover conn->c_trans->conn_free(conn->c_transport_data); 38200e0f34cSAndy Grover 38300e0f34cSAndy Grover /* 38400e0f34cSAndy Grover * The congestion maps aren't freed up here. They're 38500e0f34cSAndy Grover * freed by rds_cong_exit() after all the connections 38600e0f34cSAndy Grover * have been freed. 38700e0f34cSAndy Grover */ 38800e0f34cSAndy Grover rds_cong_remove_conn(conn); 38900e0f34cSAndy Grover 39000e0f34cSAndy Grover BUG_ON(!list_empty(&conn->c_retrans)); 39100e0f34cSAndy Grover kmem_cache_free(rds_conn_slab, conn); 39200e0f34cSAndy Grover 393fe8ff6b5SZach Brown spin_lock_irqsave(&rds_conn_lock, flags); 39400e0f34cSAndy Grover rds_conn_count--; 395fe8ff6b5SZach Brown spin_unlock_irqrestore(&rds_conn_lock, flags); 39600e0f34cSAndy Grover } 397616b757aSAndy Grover EXPORT_SYMBOL_GPL(rds_conn_destroy); 39800e0f34cSAndy Grover 39900e0f34cSAndy Grover static void rds_conn_message_info(struct socket *sock, unsigned int len, 40000e0f34cSAndy Grover struct rds_info_iterator *iter, 40100e0f34cSAndy Grover struct rds_info_lengths *lens, 40200e0f34cSAndy Grover int want_send) 40300e0f34cSAndy Grover { 40400e0f34cSAndy Grover struct hlist_head *head; 40500e0f34cSAndy Grover struct list_head *list; 40600e0f34cSAndy Grover struct rds_connection *conn; 40700e0f34cSAndy Grover struct rds_message *rm; 40800e0f34cSAndy Grover unsigned int total = 0; 409501dcccdSZach Brown unsigned long flags; 41000e0f34cSAndy Grover size_t i; 41100e0f34cSAndy Grover 41200e0f34cSAndy Grover len /= sizeof(struct rds_info_message); 41300e0f34cSAndy Grover 414bcf50ef2SChris Mason rcu_read_lock(); 41500e0f34cSAndy Grover 41600e0f34cSAndy Grover for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); 41700e0f34cSAndy Grover i++, head++) { 418b67bfe0dSSasha Levin hlist_for_each_entry_rcu(conn, head, c_hash_node) { 41900e0f34cSAndy Grover if (want_send) 42000e0f34cSAndy Grover list = &conn->c_send_queue; 42100e0f34cSAndy Grover else 42200e0f34cSAndy Grover list = &conn->c_retrans; 42300e0f34cSAndy Grover 424501dcccdSZach Brown spin_lock_irqsave(&conn->c_lock, flags); 42500e0f34cSAndy Grover 42600e0f34cSAndy Grover /* XXX too lazy to maintain counts.. */ 42700e0f34cSAndy Grover list_for_each_entry(rm, list, m_conn_item) { 42800e0f34cSAndy Grover total++; 42900e0f34cSAndy Grover if (total <= len) 43000e0f34cSAndy Grover rds_inc_info_copy(&rm->m_inc, iter, 43100e0f34cSAndy Grover conn->c_laddr, 43200e0f34cSAndy Grover conn->c_faddr, 0); 43300e0f34cSAndy Grover } 43400e0f34cSAndy Grover 435501dcccdSZach Brown spin_unlock_irqrestore(&conn->c_lock, flags); 43600e0f34cSAndy Grover } 43700e0f34cSAndy Grover } 438bcf50ef2SChris Mason rcu_read_unlock(); 43900e0f34cSAndy Grover 44000e0f34cSAndy Grover lens->nr = total; 44100e0f34cSAndy Grover lens->each = sizeof(struct rds_info_message); 44200e0f34cSAndy Grover } 44300e0f34cSAndy Grover 44400e0f34cSAndy Grover static void rds_conn_message_info_send(struct socket *sock, unsigned int len, 44500e0f34cSAndy Grover struct rds_info_iterator *iter, 44600e0f34cSAndy Grover struct rds_info_lengths *lens) 44700e0f34cSAndy Grover { 44800e0f34cSAndy Grover rds_conn_message_info(sock, len, iter, lens, 1); 44900e0f34cSAndy Grover } 45000e0f34cSAndy Grover 45100e0f34cSAndy Grover static void rds_conn_message_info_retrans(struct socket *sock, 45200e0f34cSAndy Grover unsigned int len, 45300e0f34cSAndy Grover struct rds_info_iterator *iter, 45400e0f34cSAndy Grover struct rds_info_lengths *lens) 45500e0f34cSAndy Grover { 45600e0f34cSAndy Grover rds_conn_message_info(sock, len, iter, lens, 0); 45700e0f34cSAndy Grover } 45800e0f34cSAndy Grover 45900e0f34cSAndy Grover void rds_for_each_conn_info(struct socket *sock, unsigned int len, 46000e0f34cSAndy Grover struct rds_info_iterator *iter, 46100e0f34cSAndy Grover struct rds_info_lengths *lens, 46200e0f34cSAndy Grover int (*visitor)(struct rds_connection *, void *), 46300e0f34cSAndy Grover size_t item_len) 46400e0f34cSAndy Grover { 46500e0f34cSAndy Grover uint64_t buffer[(item_len + 7) / 8]; 46600e0f34cSAndy Grover struct hlist_head *head; 46700e0f34cSAndy Grover struct rds_connection *conn; 46800e0f34cSAndy Grover size_t i; 46900e0f34cSAndy Grover 470bcf50ef2SChris Mason rcu_read_lock(); 47100e0f34cSAndy Grover 47200e0f34cSAndy Grover lens->nr = 0; 47300e0f34cSAndy Grover lens->each = item_len; 47400e0f34cSAndy Grover 47500e0f34cSAndy Grover for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); 47600e0f34cSAndy Grover i++, head++) { 477b67bfe0dSSasha Levin hlist_for_each_entry_rcu(conn, head, c_hash_node) { 47800e0f34cSAndy Grover 47900e0f34cSAndy Grover /* XXX no c_lock usage.. */ 48000e0f34cSAndy Grover if (!visitor(conn, buffer)) 48100e0f34cSAndy Grover continue; 48200e0f34cSAndy Grover 48300e0f34cSAndy Grover /* We copy as much as we can fit in the buffer, 48400e0f34cSAndy Grover * but we count all items so that the caller 48500e0f34cSAndy Grover * can resize the buffer. */ 48600e0f34cSAndy Grover if (len >= item_len) { 48700e0f34cSAndy Grover rds_info_copy(iter, buffer, item_len); 48800e0f34cSAndy Grover len -= item_len; 48900e0f34cSAndy Grover } 49000e0f34cSAndy Grover lens->nr++; 49100e0f34cSAndy Grover } 49200e0f34cSAndy Grover } 493bcf50ef2SChris Mason rcu_read_unlock(); 49400e0f34cSAndy Grover } 495616b757aSAndy Grover EXPORT_SYMBOL_GPL(rds_for_each_conn_info); 49600e0f34cSAndy Grover 49700e0f34cSAndy Grover static int rds_conn_info_visitor(struct rds_connection *conn, 49800e0f34cSAndy Grover void *buffer) 49900e0f34cSAndy Grover { 50000e0f34cSAndy Grover struct rds_info_connection *cinfo = buffer; 50100e0f34cSAndy Grover 50200e0f34cSAndy Grover cinfo->next_tx_seq = conn->c_next_tx_seq; 50300e0f34cSAndy Grover cinfo->next_rx_seq = conn->c_next_rx_seq; 50400e0f34cSAndy Grover cinfo->laddr = conn->c_laddr; 50500e0f34cSAndy Grover cinfo->faddr = conn->c_faddr; 50600e0f34cSAndy Grover strncpy(cinfo->transport, conn->c_trans->t_name, 50700e0f34cSAndy Grover sizeof(cinfo->transport)); 50800e0f34cSAndy Grover cinfo->flags = 0; 50900e0f34cSAndy Grover 5100f4b1c7eSZach Brown rds_conn_info_set(cinfo->flags, test_bit(RDS_IN_XMIT, &conn->c_flags), 5110f4b1c7eSZach Brown SENDING); 51200e0f34cSAndy Grover /* XXX Future: return the state rather than these funky bits */ 51300e0f34cSAndy Grover rds_conn_info_set(cinfo->flags, 51400e0f34cSAndy Grover atomic_read(&conn->c_state) == RDS_CONN_CONNECTING, 51500e0f34cSAndy Grover CONNECTING); 51600e0f34cSAndy Grover rds_conn_info_set(cinfo->flags, 51700e0f34cSAndy Grover atomic_read(&conn->c_state) == RDS_CONN_UP, 51800e0f34cSAndy Grover CONNECTED); 51900e0f34cSAndy Grover return 1; 52000e0f34cSAndy Grover } 52100e0f34cSAndy Grover 52200e0f34cSAndy Grover static void rds_conn_info(struct socket *sock, unsigned int len, 52300e0f34cSAndy Grover struct rds_info_iterator *iter, 52400e0f34cSAndy Grover struct rds_info_lengths *lens) 52500e0f34cSAndy Grover { 52600e0f34cSAndy Grover rds_for_each_conn_info(sock, len, iter, lens, 52700e0f34cSAndy Grover rds_conn_info_visitor, 52800e0f34cSAndy Grover sizeof(struct rds_info_connection)); 52900e0f34cSAndy Grover } 53000e0f34cSAndy Grover 531ef87b7eaSZach Brown int rds_conn_init(void) 53200e0f34cSAndy Grover { 53300e0f34cSAndy Grover rds_conn_slab = kmem_cache_create("rds_connection", 53400e0f34cSAndy Grover sizeof(struct rds_connection), 53500e0f34cSAndy Grover 0, 0, NULL); 5368690bfa1SAndy Grover if (!rds_conn_slab) 53700e0f34cSAndy Grover return -ENOMEM; 53800e0f34cSAndy Grover 53900e0f34cSAndy Grover rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info); 54000e0f34cSAndy Grover rds_info_register_func(RDS_INFO_SEND_MESSAGES, 54100e0f34cSAndy Grover rds_conn_message_info_send); 54200e0f34cSAndy Grover rds_info_register_func(RDS_INFO_RETRANS_MESSAGES, 54300e0f34cSAndy Grover rds_conn_message_info_retrans); 54400e0f34cSAndy Grover 54500e0f34cSAndy Grover return 0; 54600e0f34cSAndy Grover } 54700e0f34cSAndy Grover 54800e0f34cSAndy Grover void rds_conn_exit(void) 54900e0f34cSAndy Grover { 55000e0f34cSAndy Grover rds_loop_exit(); 55100e0f34cSAndy Grover 55200e0f34cSAndy Grover WARN_ON(!hlist_empty(rds_conn_hash)); 55300e0f34cSAndy Grover 55400e0f34cSAndy Grover kmem_cache_destroy(rds_conn_slab); 55500e0f34cSAndy Grover 55600e0f34cSAndy Grover rds_info_deregister_func(RDS_INFO_CONNECTIONS, rds_conn_info); 55700e0f34cSAndy Grover rds_info_deregister_func(RDS_INFO_SEND_MESSAGES, 55800e0f34cSAndy Grover rds_conn_message_info_send); 55900e0f34cSAndy Grover rds_info_deregister_func(RDS_INFO_RETRANS_MESSAGES, 56000e0f34cSAndy Grover rds_conn_message_info_retrans); 56100e0f34cSAndy Grover } 56200e0f34cSAndy Grover 56300e0f34cSAndy Grover /* 56400e0f34cSAndy Grover * Force a disconnect 56500e0f34cSAndy Grover */ 56600e0f34cSAndy Grover void rds_conn_drop(struct rds_connection *conn) 56700e0f34cSAndy Grover { 56800e0f34cSAndy Grover atomic_set(&conn->c_state, RDS_CONN_ERROR); 56900e0f34cSAndy Grover queue_work(rds_wq, &conn->c_down_w); 57000e0f34cSAndy Grover } 571616b757aSAndy Grover EXPORT_SYMBOL_GPL(rds_conn_drop); 57200e0f34cSAndy Grover 57300e0f34cSAndy Grover /* 574f3c6808dSZach Brown * If the connection is down, trigger a connect. We may have scheduled a 575f3c6808dSZach Brown * delayed reconnect however - in this case we should not interfere. 576f3c6808dSZach Brown */ 577f3c6808dSZach Brown void rds_conn_connect_if_down(struct rds_connection *conn) 578f3c6808dSZach Brown { 579f3c6808dSZach Brown if (rds_conn_state(conn) == RDS_CONN_DOWN && 580f3c6808dSZach Brown !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags)) 581f3c6808dSZach Brown queue_delayed_work(rds_wq, &conn->c_conn_w, 0); 582f3c6808dSZach Brown } 583f3c6808dSZach Brown EXPORT_SYMBOL_GPL(rds_conn_connect_if_down); 584f3c6808dSZach Brown 585f3c6808dSZach Brown /* 58600e0f34cSAndy Grover * An error occurred on the connection 58700e0f34cSAndy Grover */ 58800e0f34cSAndy Grover void 58900e0f34cSAndy Grover __rds_conn_error(struct rds_connection *conn, const char *fmt, ...) 59000e0f34cSAndy Grover { 59100e0f34cSAndy Grover va_list ap; 59200e0f34cSAndy Grover 59300e0f34cSAndy Grover va_start(ap, fmt); 59400e0f34cSAndy Grover vprintk(fmt, ap); 59500e0f34cSAndy Grover va_end(ap); 59600e0f34cSAndy Grover 59700e0f34cSAndy Grover rds_conn_drop(conn); 59800e0f34cSAndy Grover } 599