100e0f34cSAndy Grover /* 200e0f34cSAndy Grover * Copyright (c) 2006 Oracle. All rights reserved. 300e0f34cSAndy Grover * 400e0f34cSAndy Grover * This software is available to you under a choice of one of two 500e0f34cSAndy Grover * licenses. You may choose to be licensed under the terms of the GNU 600e0f34cSAndy Grover * General Public License (GPL) Version 2, available from the file 700e0f34cSAndy Grover * COPYING in the main directory of this source tree, or the 800e0f34cSAndy Grover * OpenIB.org BSD license below: 900e0f34cSAndy Grover * 1000e0f34cSAndy Grover * Redistribution and use in source and binary forms, with or 1100e0f34cSAndy Grover * without modification, are permitted provided that the following 1200e0f34cSAndy Grover * conditions are met: 1300e0f34cSAndy Grover * 1400e0f34cSAndy Grover * - Redistributions of source code must retain the above 1500e0f34cSAndy Grover * copyright notice, this list of conditions and the following 1600e0f34cSAndy Grover * disclaimer. 1700e0f34cSAndy Grover * 1800e0f34cSAndy Grover * - Redistributions in binary form must reproduce the above 1900e0f34cSAndy Grover * copyright notice, this list of conditions and the following 2000e0f34cSAndy Grover * disclaimer in the documentation and/or other materials 2100e0f34cSAndy Grover * provided with the distribution. 2200e0f34cSAndy Grover * 2300e0f34cSAndy Grover * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 2400e0f34cSAndy Grover * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 2500e0f34cSAndy Grover * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 2600e0f34cSAndy Grover * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 2700e0f34cSAndy Grover * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 2800e0f34cSAndy Grover * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 2900e0f34cSAndy Grover * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 3000e0f34cSAndy Grover * SOFTWARE. 3100e0f34cSAndy Grover * 3200e0f34cSAndy Grover */ 3300e0f34cSAndy Grover #include <linux/kernel.h> 3400e0f34cSAndy Grover #include <linux/list.h> 355a0e3ad6STejun Heo #include <linux/slab.h> 3600e0f34cSAndy Grover #include <net/inet_hashtables.h> 3700e0f34cSAndy Grover 3800e0f34cSAndy Grover #include "rds.h" 3900e0f34cSAndy Grover #include "loop.h" 4000e0f34cSAndy Grover 4100e0f34cSAndy Grover #define RDS_CONNECTION_HASH_BITS 12 4200e0f34cSAndy Grover #define RDS_CONNECTION_HASH_ENTRIES (1 << RDS_CONNECTION_HASH_BITS) 4300e0f34cSAndy Grover #define RDS_CONNECTION_HASH_MASK (RDS_CONNECTION_HASH_ENTRIES - 1) 4400e0f34cSAndy Grover 4500e0f34cSAndy Grover /* converting this to RCU is a chore for another day.. */ 4600e0f34cSAndy Grover static DEFINE_SPINLOCK(rds_conn_lock); 4700e0f34cSAndy Grover static unsigned long rds_conn_count; 4800e0f34cSAndy Grover static struct hlist_head rds_conn_hash[RDS_CONNECTION_HASH_ENTRIES]; 4900e0f34cSAndy Grover static struct kmem_cache *rds_conn_slab; 5000e0f34cSAndy Grover 5100e0f34cSAndy Grover static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr) 5200e0f34cSAndy Grover { 5300e0f34cSAndy Grover /* Pass NULL, don't need struct net for hash */ 5400e0f34cSAndy Grover unsigned long hash = inet_ehashfn(NULL, 5500e0f34cSAndy Grover be32_to_cpu(laddr), 0, 5600e0f34cSAndy Grover be32_to_cpu(faddr), 0); 5700e0f34cSAndy Grover return &rds_conn_hash[hash & RDS_CONNECTION_HASH_MASK]; 5800e0f34cSAndy Grover } 5900e0f34cSAndy Grover 6000e0f34cSAndy Grover #define rds_conn_info_set(var, test, suffix) do { \ 6100e0f34cSAndy Grover if (test) \ 6200e0f34cSAndy Grover var |= RDS_INFO_CONNECTION_FLAG_##suffix; \ 6300e0f34cSAndy Grover } while (0) 6400e0f34cSAndy Grover 65bcf50ef2SChris Mason /* rcu read lock must be held or the connection spinlock */ 6600e0f34cSAndy Grover static struct rds_connection *rds_conn_lookup(struct hlist_head *head, 6700e0f34cSAndy Grover __be32 laddr, __be32 faddr, 6800e0f34cSAndy Grover struct rds_transport *trans) 6900e0f34cSAndy Grover { 7000e0f34cSAndy Grover struct rds_connection *conn, *ret = NULL; 7100e0f34cSAndy Grover struct hlist_node *pos; 7200e0f34cSAndy Grover 73bcf50ef2SChris Mason hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) { 7400e0f34cSAndy Grover if (conn->c_faddr == faddr && conn->c_laddr == laddr && 7500e0f34cSAndy Grover conn->c_trans == trans) { 7600e0f34cSAndy Grover ret = conn; 7700e0f34cSAndy Grover break; 7800e0f34cSAndy Grover } 7900e0f34cSAndy Grover } 8000e0f34cSAndy Grover rdsdebug("returning conn %p for %pI4 -> %pI4\n", ret, 8100e0f34cSAndy Grover &laddr, &faddr); 8200e0f34cSAndy Grover return ret; 8300e0f34cSAndy Grover } 8400e0f34cSAndy Grover 8500e0f34cSAndy Grover /* 8600e0f34cSAndy Grover * This is called by transports as they're bringing down a connection. 8700e0f34cSAndy Grover * It clears partial message state so that the transport can start sending 8800e0f34cSAndy Grover * and receiving over this connection again in the future. It is up to 8900e0f34cSAndy Grover * the transport to have serialized this call with its send and recv. 9000e0f34cSAndy Grover */ 9100e0f34cSAndy Grover void rds_conn_reset(struct rds_connection *conn) 9200e0f34cSAndy Grover { 9300e0f34cSAndy Grover rdsdebug("connection %pI4 to %pI4 reset\n", 9400e0f34cSAndy Grover &conn->c_laddr, &conn->c_faddr); 9500e0f34cSAndy Grover 9600e0f34cSAndy Grover rds_stats_inc(s_conn_reset); 9700e0f34cSAndy Grover rds_send_reset(conn); 9800e0f34cSAndy Grover conn->c_flags = 0; 9900e0f34cSAndy Grover 10000e0f34cSAndy Grover /* Do not clear next_rx_seq here, else we cannot distinguish 10100e0f34cSAndy Grover * retransmitted packets from new packets, and will hand all 10200e0f34cSAndy Grover * of them to the application. That is not consistent with the 10300e0f34cSAndy Grover * reliability guarantees of RDS. */ 10400e0f34cSAndy Grover } 10500e0f34cSAndy Grover 10600e0f34cSAndy Grover /* 10700e0f34cSAndy Grover * There is only every one 'conn' for a given pair of addresses in the 10800e0f34cSAndy Grover * system at a time. They contain messages to be retransmitted and so 10900e0f34cSAndy Grover * span the lifetime of the actual underlying transport connections. 11000e0f34cSAndy Grover * 11100e0f34cSAndy Grover * For now they are not garbage collected once they're created. They 11200e0f34cSAndy Grover * are torn down as the module is removed, if ever. 11300e0f34cSAndy Grover */ 11400e0f34cSAndy Grover static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr, 11500e0f34cSAndy Grover struct rds_transport *trans, gfp_t gfp, 11600e0f34cSAndy Grover int is_outgoing) 11700e0f34cSAndy Grover { 118cb24405eSAndy Grover struct rds_connection *conn, *parent = NULL; 11900e0f34cSAndy Grover struct hlist_head *head = rds_conn_bucket(laddr, faddr); 12000e0f34cSAndy Grover unsigned long flags; 12100e0f34cSAndy Grover int ret; 12200e0f34cSAndy Grover 123bcf50ef2SChris Mason 124bcf50ef2SChris Mason rcu_read_lock(); 12500e0f34cSAndy Grover conn = rds_conn_lookup(head, laddr, faddr, trans); 126f64f9e71SJoe Perches if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport && 127f64f9e71SJoe Perches !is_outgoing) { 12800e0f34cSAndy Grover /* This is a looped back IB connection, and we're 12900e0f34cSAndy Grover * called by the code handling the incoming connect. 13000e0f34cSAndy Grover * We need a second connection object into which we 13100e0f34cSAndy Grover * can stick the other QP. */ 13200e0f34cSAndy Grover parent = conn; 13300e0f34cSAndy Grover conn = parent->c_passive; 13400e0f34cSAndy Grover } 135bcf50ef2SChris Mason rcu_read_unlock(); 13600e0f34cSAndy Grover if (conn) 13700e0f34cSAndy Grover goto out; 13800e0f34cSAndy Grover 13905a178ecSWei Yongjun conn = kmem_cache_zalloc(rds_conn_slab, gfp); 1408690bfa1SAndy Grover if (!conn) { 14100e0f34cSAndy Grover conn = ERR_PTR(-ENOMEM); 14200e0f34cSAndy Grover goto out; 14300e0f34cSAndy Grover } 14400e0f34cSAndy Grover 14500e0f34cSAndy Grover INIT_HLIST_NODE(&conn->c_hash_node); 14600e0f34cSAndy Grover conn->c_laddr = laddr; 14700e0f34cSAndy Grover conn->c_faddr = faddr; 14800e0f34cSAndy Grover spin_lock_init(&conn->c_lock); 14900e0f34cSAndy Grover conn->c_next_tx_seq = 1; 15000e0f34cSAndy Grover 151*0f4b1c7eSZach Brown init_waitqueue_head(&conn->c_waitq); 15200e0f34cSAndy Grover INIT_LIST_HEAD(&conn->c_send_queue); 15300e0f34cSAndy Grover INIT_LIST_HEAD(&conn->c_retrans); 15400e0f34cSAndy Grover 15500e0f34cSAndy Grover ret = rds_cong_get_maps(conn); 15600e0f34cSAndy Grover if (ret) { 15700e0f34cSAndy Grover kmem_cache_free(rds_conn_slab, conn); 15800e0f34cSAndy Grover conn = ERR_PTR(ret); 15900e0f34cSAndy Grover goto out; 16000e0f34cSAndy Grover } 16100e0f34cSAndy Grover 16200e0f34cSAndy Grover /* 16300e0f34cSAndy Grover * This is where a connection becomes loopback. If *any* RDS sockets 16400e0f34cSAndy Grover * can bind to the destination address then we'd rather the messages 16500e0f34cSAndy Grover * flow through loopback rather than either transport. 16600e0f34cSAndy Grover */ 16700e0f34cSAndy Grover if (rds_trans_get_preferred(faddr)) { 16800e0f34cSAndy Grover conn->c_loopback = 1; 16900e0f34cSAndy Grover if (is_outgoing && trans->t_prefer_loopback) { 17000e0f34cSAndy Grover /* "outgoing" connection - and the transport 17100e0f34cSAndy Grover * says it wants the connection handled by the 17200e0f34cSAndy Grover * loopback transport. This is what TCP does. 17300e0f34cSAndy Grover */ 17400e0f34cSAndy Grover trans = &rds_loop_transport; 17500e0f34cSAndy Grover } 17600e0f34cSAndy Grover } 17700e0f34cSAndy Grover 17800e0f34cSAndy Grover conn->c_trans = trans; 17900e0f34cSAndy Grover 18000e0f34cSAndy Grover ret = trans->conn_alloc(conn, gfp); 18100e0f34cSAndy Grover if (ret) { 18200e0f34cSAndy Grover kmem_cache_free(rds_conn_slab, conn); 18300e0f34cSAndy Grover conn = ERR_PTR(ret); 18400e0f34cSAndy Grover goto out; 18500e0f34cSAndy Grover } 18600e0f34cSAndy Grover 18700e0f34cSAndy Grover atomic_set(&conn->c_state, RDS_CONN_DOWN); 18800e0f34cSAndy Grover conn->c_reconnect_jiffies = 0; 18900e0f34cSAndy Grover INIT_DELAYED_WORK(&conn->c_send_w, rds_send_worker); 19000e0f34cSAndy Grover INIT_DELAYED_WORK(&conn->c_recv_w, rds_recv_worker); 19100e0f34cSAndy Grover INIT_DELAYED_WORK(&conn->c_conn_w, rds_connect_worker); 19200e0f34cSAndy Grover INIT_WORK(&conn->c_down_w, rds_shutdown_worker); 19300e0f34cSAndy Grover mutex_init(&conn->c_cm_lock); 19400e0f34cSAndy Grover conn->c_flags = 0; 19500e0f34cSAndy Grover 19600e0f34cSAndy Grover rdsdebug("allocated conn %p for %pI4 -> %pI4 over %s %s\n", 19700e0f34cSAndy Grover conn, &laddr, &faddr, 19800e0f34cSAndy Grover trans->t_name ? trans->t_name : "[unknown]", 19900e0f34cSAndy Grover is_outgoing ? "(outgoing)" : ""); 20000e0f34cSAndy Grover 201cb24405eSAndy Grover /* 202cb24405eSAndy Grover * Since we ran without holding the conn lock, someone could 203cb24405eSAndy Grover * have created the same conn (either normal or passive) in the 204cb24405eSAndy Grover * interim. We check while holding the lock. If we won, we complete 205cb24405eSAndy Grover * init and return our conn. If we lost, we rollback and return the 206cb24405eSAndy Grover * other one. 207cb24405eSAndy Grover */ 20800e0f34cSAndy Grover spin_lock_irqsave(&rds_conn_lock, flags); 209cb24405eSAndy Grover if (parent) { 210cb24405eSAndy Grover /* Creating passive conn */ 211cb24405eSAndy Grover if (parent->c_passive) { 21200e0f34cSAndy Grover trans->conn_free(conn->c_transport_data); 21300e0f34cSAndy Grover kmem_cache_free(rds_conn_slab, conn); 214cb24405eSAndy Grover conn = parent->c_passive; 21500e0f34cSAndy Grover } else { 216cb24405eSAndy Grover parent->c_passive = conn; 21700e0f34cSAndy Grover rds_cong_add_conn(conn); 21800e0f34cSAndy Grover rds_conn_count++; 21900e0f34cSAndy Grover } 220cb24405eSAndy Grover } else { 221cb24405eSAndy Grover /* Creating normal conn */ 222cb24405eSAndy Grover struct rds_connection *found; 22300e0f34cSAndy Grover 224cb24405eSAndy Grover found = rds_conn_lookup(head, laddr, faddr, trans); 225cb24405eSAndy Grover if (found) { 226cb24405eSAndy Grover trans->conn_free(conn->c_transport_data); 227cb24405eSAndy Grover kmem_cache_free(rds_conn_slab, conn); 228cb24405eSAndy Grover conn = found; 229cb24405eSAndy Grover } else { 230bcf50ef2SChris Mason hlist_add_head_rcu(&conn->c_hash_node, head); 231cb24405eSAndy Grover rds_cong_add_conn(conn); 232cb24405eSAndy Grover rds_conn_count++; 233cb24405eSAndy Grover } 234cb24405eSAndy Grover } 23500e0f34cSAndy Grover spin_unlock_irqrestore(&rds_conn_lock, flags); 23600e0f34cSAndy Grover 23700e0f34cSAndy Grover out: 23800e0f34cSAndy Grover return conn; 23900e0f34cSAndy Grover } 24000e0f34cSAndy Grover 24100e0f34cSAndy Grover struct rds_connection *rds_conn_create(__be32 laddr, __be32 faddr, 24200e0f34cSAndy Grover struct rds_transport *trans, gfp_t gfp) 24300e0f34cSAndy Grover { 24400e0f34cSAndy Grover return __rds_conn_create(laddr, faddr, trans, gfp, 0); 24500e0f34cSAndy Grover } 246616b757aSAndy Grover EXPORT_SYMBOL_GPL(rds_conn_create); 24700e0f34cSAndy Grover 24800e0f34cSAndy Grover struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr, 24900e0f34cSAndy Grover struct rds_transport *trans, gfp_t gfp) 25000e0f34cSAndy Grover { 25100e0f34cSAndy Grover return __rds_conn_create(laddr, faddr, trans, gfp, 1); 25200e0f34cSAndy Grover } 253616b757aSAndy Grover EXPORT_SYMBOL_GPL(rds_conn_create_outgoing); 25400e0f34cSAndy Grover 2552dc39357SAndy Grover void rds_conn_shutdown(struct rds_connection *conn) 2562dc39357SAndy Grover { 2572dc39357SAndy Grover /* shut it down unless it's down already */ 2582dc39357SAndy Grover if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_DOWN)) { 2592dc39357SAndy Grover /* 2602dc39357SAndy Grover * Quiesce the connection mgmt handlers before we start tearing 2612dc39357SAndy Grover * things down. We don't hold the mutex for the entire 2622dc39357SAndy Grover * duration of the shutdown operation, else we may be 2632dc39357SAndy Grover * deadlocking with the CM handler. Instead, the CM event 2642dc39357SAndy Grover * handler is supposed to check for state DISCONNECTING 2652dc39357SAndy Grover */ 2662dc39357SAndy Grover mutex_lock(&conn->c_cm_lock); 2672dc39357SAndy Grover if (!rds_conn_transition(conn, RDS_CONN_UP, RDS_CONN_DISCONNECTING) 2682dc39357SAndy Grover && !rds_conn_transition(conn, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) { 2692dc39357SAndy Grover rds_conn_error(conn, "shutdown called in state %d\n", 2702dc39357SAndy Grover atomic_read(&conn->c_state)); 2712dc39357SAndy Grover mutex_unlock(&conn->c_cm_lock); 2722dc39357SAndy Grover return; 2732dc39357SAndy Grover } 2742dc39357SAndy Grover mutex_unlock(&conn->c_cm_lock); 2752dc39357SAndy Grover 276*0f4b1c7eSZach Brown wait_event(conn->c_waitq, 277*0f4b1c7eSZach Brown !test_bit(RDS_IN_XMIT, &conn->c_flags)); 2787e3f2952SChris Mason 2792dc39357SAndy Grover conn->c_trans->conn_shutdown(conn); 2802dc39357SAndy Grover rds_conn_reset(conn); 2812dc39357SAndy Grover 2822dc39357SAndy Grover if (!rds_conn_transition(conn, RDS_CONN_DISCONNECTING, RDS_CONN_DOWN)) { 2832dc39357SAndy Grover /* This can happen - eg when we're in the middle of tearing 2842dc39357SAndy Grover * down the connection, and someone unloads the rds module. 2852dc39357SAndy Grover * Quite reproduceable with loopback connections. 2862dc39357SAndy Grover * Mostly harmless. 2872dc39357SAndy Grover */ 2882dc39357SAndy Grover rds_conn_error(conn, 2892dc39357SAndy Grover "%s: failed to transition to state DOWN, " 2902dc39357SAndy Grover "current state is %d\n", 2912dc39357SAndy Grover __func__, 2922dc39357SAndy Grover atomic_read(&conn->c_state)); 2932dc39357SAndy Grover return; 2942dc39357SAndy Grover } 2952dc39357SAndy Grover } 2962dc39357SAndy Grover 2972dc39357SAndy Grover /* Then reconnect if it's still live. 2982dc39357SAndy Grover * The passive side of an IB loopback connection is never added 2992dc39357SAndy Grover * to the conn hash, so we never trigger a reconnect on this 3002dc39357SAndy Grover * conn - the reconnect is always triggered by the active peer. */ 3012dc39357SAndy Grover cancel_delayed_work_sync(&conn->c_conn_w); 302bcf50ef2SChris Mason rcu_read_lock(); 303bcf50ef2SChris Mason if (!hlist_unhashed(&conn->c_hash_node)) { 304bcf50ef2SChris Mason rcu_read_unlock(); 3052dc39357SAndy Grover rds_queue_reconnect(conn); 306bcf50ef2SChris Mason } else { 307bcf50ef2SChris Mason rcu_read_unlock(); 308bcf50ef2SChris Mason } 3092dc39357SAndy Grover } 3102dc39357SAndy Grover 3112dc39357SAndy Grover /* 3122dc39357SAndy Grover * Stop and free a connection. 3132dc39357SAndy Grover */ 31400e0f34cSAndy Grover void rds_conn_destroy(struct rds_connection *conn) 31500e0f34cSAndy Grover { 31600e0f34cSAndy Grover struct rds_message *rm, *rtmp; 31700e0f34cSAndy Grover 31800e0f34cSAndy Grover rdsdebug("freeing conn %p for %pI4 -> " 31900e0f34cSAndy Grover "%pI4\n", conn, &conn->c_laddr, 32000e0f34cSAndy Grover &conn->c_faddr); 32100e0f34cSAndy Grover 322abf45439SChris Mason /* Ensure conn will not be scheduled for reconnect */ 323abf45439SChris Mason spin_lock_irq(&rds_conn_lock); 324bcf50ef2SChris Mason hlist_del_init_rcu(&conn->c_hash_node); 325abf45439SChris Mason spin_unlock_irq(&rds_conn_lock); 32600e0f34cSAndy Grover 327bcf50ef2SChris Mason synchronize_rcu(); 328bcf50ef2SChris Mason 329bcf50ef2SChris Mason rds_conn_shutdown(conn); 33000e0f34cSAndy Grover 33100e0f34cSAndy Grover /* tear down queued messages */ 33200e0f34cSAndy Grover list_for_each_entry_safe(rm, rtmp, 33300e0f34cSAndy Grover &conn->c_send_queue, 33400e0f34cSAndy Grover m_conn_item) { 33500e0f34cSAndy Grover list_del_init(&rm->m_conn_item); 33600e0f34cSAndy Grover BUG_ON(!list_empty(&rm->m_sock_item)); 33700e0f34cSAndy Grover rds_message_put(rm); 33800e0f34cSAndy Grover } 33900e0f34cSAndy Grover if (conn->c_xmit_rm) 34000e0f34cSAndy Grover rds_message_put(conn->c_xmit_rm); 34100e0f34cSAndy Grover 34200e0f34cSAndy Grover conn->c_trans->conn_free(conn->c_transport_data); 34300e0f34cSAndy Grover 34400e0f34cSAndy Grover /* 34500e0f34cSAndy Grover * The congestion maps aren't freed up here. They're 34600e0f34cSAndy Grover * freed by rds_cong_exit() after all the connections 34700e0f34cSAndy Grover * have been freed. 34800e0f34cSAndy Grover */ 34900e0f34cSAndy Grover rds_cong_remove_conn(conn); 35000e0f34cSAndy Grover 35100e0f34cSAndy Grover BUG_ON(!list_empty(&conn->c_retrans)); 35200e0f34cSAndy Grover kmem_cache_free(rds_conn_slab, conn); 35300e0f34cSAndy Grover 35400e0f34cSAndy Grover rds_conn_count--; 35500e0f34cSAndy Grover } 356616b757aSAndy Grover EXPORT_SYMBOL_GPL(rds_conn_destroy); 35700e0f34cSAndy Grover 35800e0f34cSAndy Grover static void rds_conn_message_info(struct socket *sock, unsigned int len, 35900e0f34cSAndy Grover struct rds_info_iterator *iter, 36000e0f34cSAndy Grover struct rds_info_lengths *lens, 36100e0f34cSAndy Grover int want_send) 36200e0f34cSAndy Grover { 36300e0f34cSAndy Grover struct hlist_head *head; 36400e0f34cSAndy Grover struct hlist_node *pos; 36500e0f34cSAndy Grover struct list_head *list; 36600e0f34cSAndy Grover struct rds_connection *conn; 36700e0f34cSAndy Grover struct rds_message *rm; 36800e0f34cSAndy Grover unsigned int total = 0; 369501dcccdSZach Brown unsigned long flags; 37000e0f34cSAndy Grover size_t i; 37100e0f34cSAndy Grover 37200e0f34cSAndy Grover len /= sizeof(struct rds_info_message); 37300e0f34cSAndy Grover 374bcf50ef2SChris Mason rcu_read_lock(); 37500e0f34cSAndy Grover 37600e0f34cSAndy Grover for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); 37700e0f34cSAndy Grover i++, head++) { 378bcf50ef2SChris Mason hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) { 37900e0f34cSAndy Grover if (want_send) 38000e0f34cSAndy Grover list = &conn->c_send_queue; 38100e0f34cSAndy Grover else 38200e0f34cSAndy Grover list = &conn->c_retrans; 38300e0f34cSAndy Grover 384501dcccdSZach Brown spin_lock_irqsave(&conn->c_lock, flags); 38500e0f34cSAndy Grover 38600e0f34cSAndy Grover /* XXX too lazy to maintain counts.. */ 38700e0f34cSAndy Grover list_for_each_entry(rm, list, m_conn_item) { 38800e0f34cSAndy Grover total++; 38900e0f34cSAndy Grover if (total <= len) 39000e0f34cSAndy Grover rds_inc_info_copy(&rm->m_inc, iter, 39100e0f34cSAndy Grover conn->c_laddr, 39200e0f34cSAndy Grover conn->c_faddr, 0); 39300e0f34cSAndy Grover } 39400e0f34cSAndy Grover 395501dcccdSZach Brown spin_unlock_irqrestore(&conn->c_lock, flags); 39600e0f34cSAndy Grover } 39700e0f34cSAndy Grover } 398bcf50ef2SChris Mason rcu_read_unlock(); 39900e0f34cSAndy Grover 40000e0f34cSAndy Grover lens->nr = total; 40100e0f34cSAndy Grover lens->each = sizeof(struct rds_info_message); 40200e0f34cSAndy Grover } 40300e0f34cSAndy Grover 40400e0f34cSAndy Grover static void rds_conn_message_info_send(struct socket *sock, unsigned int len, 40500e0f34cSAndy Grover struct rds_info_iterator *iter, 40600e0f34cSAndy Grover struct rds_info_lengths *lens) 40700e0f34cSAndy Grover { 40800e0f34cSAndy Grover rds_conn_message_info(sock, len, iter, lens, 1); 40900e0f34cSAndy Grover } 41000e0f34cSAndy Grover 41100e0f34cSAndy Grover static void rds_conn_message_info_retrans(struct socket *sock, 41200e0f34cSAndy Grover unsigned int len, 41300e0f34cSAndy Grover struct rds_info_iterator *iter, 41400e0f34cSAndy Grover struct rds_info_lengths *lens) 41500e0f34cSAndy Grover { 41600e0f34cSAndy Grover rds_conn_message_info(sock, len, iter, lens, 0); 41700e0f34cSAndy Grover } 41800e0f34cSAndy Grover 41900e0f34cSAndy Grover void rds_for_each_conn_info(struct socket *sock, unsigned int len, 42000e0f34cSAndy Grover struct rds_info_iterator *iter, 42100e0f34cSAndy Grover struct rds_info_lengths *lens, 42200e0f34cSAndy Grover int (*visitor)(struct rds_connection *, void *), 42300e0f34cSAndy Grover size_t item_len) 42400e0f34cSAndy Grover { 42500e0f34cSAndy Grover uint64_t buffer[(item_len + 7) / 8]; 42600e0f34cSAndy Grover struct hlist_head *head; 42700e0f34cSAndy Grover struct hlist_node *pos; 42800e0f34cSAndy Grover struct rds_connection *conn; 42900e0f34cSAndy Grover size_t i; 43000e0f34cSAndy Grover 431bcf50ef2SChris Mason rcu_read_lock(); 43200e0f34cSAndy Grover 43300e0f34cSAndy Grover lens->nr = 0; 43400e0f34cSAndy Grover lens->each = item_len; 43500e0f34cSAndy Grover 43600e0f34cSAndy Grover for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); 43700e0f34cSAndy Grover i++, head++) { 438bcf50ef2SChris Mason hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) { 43900e0f34cSAndy Grover 44000e0f34cSAndy Grover /* XXX no c_lock usage.. */ 44100e0f34cSAndy Grover if (!visitor(conn, buffer)) 44200e0f34cSAndy Grover continue; 44300e0f34cSAndy Grover 44400e0f34cSAndy Grover /* We copy as much as we can fit in the buffer, 44500e0f34cSAndy Grover * but we count all items so that the caller 44600e0f34cSAndy Grover * can resize the buffer. */ 44700e0f34cSAndy Grover if (len >= item_len) { 44800e0f34cSAndy Grover rds_info_copy(iter, buffer, item_len); 44900e0f34cSAndy Grover len -= item_len; 45000e0f34cSAndy Grover } 45100e0f34cSAndy Grover lens->nr++; 45200e0f34cSAndy Grover } 45300e0f34cSAndy Grover } 454bcf50ef2SChris Mason rcu_read_unlock(); 45500e0f34cSAndy Grover } 456616b757aSAndy Grover EXPORT_SYMBOL_GPL(rds_for_each_conn_info); 45700e0f34cSAndy Grover 45800e0f34cSAndy Grover static int rds_conn_info_visitor(struct rds_connection *conn, 45900e0f34cSAndy Grover void *buffer) 46000e0f34cSAndy Grover { 46100e0f34cSAndy Grover struct rds_info_connection *cinfo = buffer; 46200e0f34cSAndy Grover 46300e0f34cSAndy Grover cinfo->next_tx_seq = conn->c_next_tx_seq; 46400e0f34cSAndy Grover cinfo->next_rx_seq = conn->c_next_rx_seq; 46500e0f34cSAndy Grover cinfo->laddr = conn->c_laddr; 46600e0f34cSAndy Grover cinfo->faddr = conn->c_faddr; 46700e0f34cSAndy Grover strncpy(cinfo->transport, conn->c_trans->t_name, 46800e0f34cSAndy Grover sizeof(cinfo->transport)); 46900e0f34cSAndy Grover cinfo->flags = 0; 47000e0f34cSAndy Grover 471*0f4b1c7eSZach Brown rds_conn_info_set(cinfo->flags, test_bit(RDS_IN_XMIT, &conn->c_flags), 472*0f4b1c7eSZach Brown SENDING); 47300e0f34cSAndy Grover /* XXX Future: return the state rather than these funky bits */ 47400e0f34cSAndy Grover rds_conn_info_set(cinfo->flags, 47500e0f34cSAndy Grover atomic_read(&conn->c_state) == RDS_CONN_CONNECTING, 47600e0f34cSAndy Grover CONNECTING); 47700e0f34cSAndy Grover rds_conn_info_set(cinfo->flags, 47800e0f34cSAndy Grover atomic_read(&conn->c_state) == RDS_CONN_UP, 47900e0f34cSAndy Grover CONNECTED); 48000e0f34cSAndy Grover return 1; 48100e0f34cSAndy Grover } 48200e0f34cSAndy Grover 48300e0f34cSAndy Grover static void rds_conn_info(struct socket *sock, unsigned int len, 48400e0f34cSAndy Grover struct rds_info_iterator *iter, 48500e0f34cSAndy Grover struct rds_info_lengths *lens) 48600e0f34cSAndy Grover { 48700e0f34cSAndy Grover rds_for_each_conn_info(sock, len, iter, lens, 48800e0f34cSAndy Grover rds_conn_info_visitor, 48900e0f34cSAndy Grover sizeof(struct rds_info_connection)); 49000e0f34cSAndy Grover } 49100e0f34cSAndy Grover 49200e0f34cSAndy Grover int __init rds_conn_init(void) 49300e0f34cSAndy Grover { 49400e0f34cSAndy Grover rds_conn_slab = kmem_cache_create("rds_connection", 49500e0f34cSAndy Grover sizeof(struct rds_connection), 49600e0f34cSAndy Grover 0, 0, NULL); 4978690bfa1SAndy Grover if (!rds_conn_slab) 49800e0f34cSAndy Grover return -ENOMEM; 49900e0f34cSAndy Grover 50000e0f34cSAndy Grover rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info); 50100e0f34cSAndy Grover rds_info_register_func(RDS_INFO_SEND_MESSAGES, 50200e0f34cSAndy Grover rds_conn_message_info_send); 50300e0f34cSAndy Grover rds_info_register_func(RDS_INFO_RETRANS_MESSAGES, 50400e0f34cSAndy Grover rds_conn_message_info_retrans); 50500e0f34cSAndy Grover 50600e0f34cSAndy Grover return 0; 50700e0f34cSAndy Grover } 50800e0f34cSAndy Grover 50900e0f34cSAndy Grover void rds_conn_exit(void) 51000e0f34cSAndy Grover { 51100e0f34cSAndy Grover rds_loop_exit(); 51200e0f34cSAndy Grover 51300e0f34cSAndy Grover WARN_ON(!hlist_empty(rds_conn_hash)); 51400e0f34cSAndy Grover 51500e0f34cSAndy Grover kmem_cache_destroy(rds_conn_slab); 51600e0f34cSAndy Grover 51700e0f34cSAndy Grover rds_info_deregister_func(RDS_INFO_CONNECTIONS, rds_conn_info); 51800e0f34cSAndy Grover rds_info_deregister_func(RDS_INFO_SEND_MESSAGES, 51900e0f34cSAndy Grover rds_conn_message_info_send); 52000e0f34cSAndy Grover rds_info_deregister_func(RDS_INFO_RETRANS_MESSAGES, 52100e0f34cSAndy Grover rds_conn_message_info_retrans); 52200e0f34cSAndy Grover } 52300e0f34cSAndy Grover 52400e0f34cSAndy Grover /* 52500e0f34cSAndy Grover * Force a disconnect 52600e0f34cSAndy Grover */ 52700e0f34cSAndy Grover void rds_conn_drop(struct rds_connection *conn) 52800e0f34cSAndy Grover { 52900e0f34cSAndy Grover atomic_set(&conn->c_state, RDS_CONN_ERROR); 53000e0f34cSAndy Grover queue_work(rds_wq, &conn->c_down_w); 53100e0f34cSAndy Grover } 532616b757aSAndy Grover EXPORT_SYMBOL_GPL(rds_conn_drop); 53300e0f34cSAndy Grover 53400e0f34cSAndy Grover /* 535f3c6808dSZach Brown * If the connection is down, trigger a connect. We may have scheduled a 536f3c6808dSZach Brown * delayed reconnect however - in this case we should not interfere. 537f3c6808dSZach Brown */ 538f3c6808dSZach Brown void rds_conn_connect_if_down(struct rds_connection *conn) 539f3c6808dSZach Brown { 540f3c6808dSZach Brown if (rds_conn_state(conn) == RDS_CONN_DOWN && 541f3c6808dSZach Brown !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags)) 542f3c6808dSZach Brown queue_delayed_work(rds_wq, &conn->c_conn_w, 0); 543f3c6808dSZach Brown } 544f3c6808dSZach Brown EXPORT_SYMBOL_GPL(rds_conn_connect_if_down); 545f3c6808dSZach Brown 546f3c6808dSZach Brown /* 54700e0f34cSAndy Grover * An error occurred on the connection 54800e0f34cSAndy Grover */ 54900e0f34cSAndy Grover void 55000e0f34cSAndy Grover __rds_conn_error(struct rds_connection *conn, const char *fmt, ...) 55100e0f34cSAndy Grover { 55200e0f34cSAndy Grover va_list ap; 55300e0f34cSAndy Grover 55400e0f34cSAndy Grover va_start(ap, fmt); 55500e0f34cSAndy Grover vprintk(fmt, ap); 55600e0f34cSAndy Grover va_end(ap); 55700e0f34cSAndy Grover 55800e0f34cSAndy Grover rds_conn_drop(conn); 55900e0f34cSAndy Grover } 560