100e0f34cSAndy Grover /* 200e0f34cSAndy Grover * Copyright (c) 2006 Oracle. All rights reserved. 300e0f34cSAndy Grover * 400e0f34cSAndy Grover * This software is available to you under a choice of one of two 500e0f34cSAndy Grover * licenses. You may choose to be licensed under the terms of the GNU 600e0f34cSAndy Grover * General Public License (GPL) Version 2, available from the file 700e0f34cSAndy Grover * COPYING in the main directory of this source tree, or the 800e0f34cSAndy Grover * OpenIB.org BSD license below: 900e0f34cSAndy Grover * 1000e0f34cSAndy Grover * Redistribution and use in source and binary forms, with or 1100e0f34cSAndy Grover * without modification, are permitted provided that the following 1200e0f34cSAndy Grover * conditions are met: 1300e0f34cSAndy Grover * 1400e0f34cSAndy Grover * - Redistributions of source code must retain the above 1500e0f34cSAndy Grover * copyright notice, this list of conditions and the following 1600e0f34cSAndy Grover * disclaimer. 1700e0f34cSAndy Grover * 1800e0f34cSAndy Grover * - Redistributions in binary form must reproduce the above 1900e0f34cSAndy Grover * copyright notice, this list of conditions and the following 2000e0f34cSAndy Grover * disclaimer in the documentation and/or other materials 2100e0f34cSAndy Grover * provided with the distribution. 2200e0f34cSAndy Grover * 2300e0f34cSAndy Grover * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 2400e0f34cSAndy Grover * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 2500e0f34cSAndy Grover * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 2600e0f34cSAndy Grover * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 2700e0f34cSAndy Grover * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 2800e0f34cSAndy Grover * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 2900e0f34cSAndy Grover * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 3000e0f34cSAndy Grover * SOFTWARE. 3100e0f34cSAndy Grover * 3200e0f34cSAndy Grover */ 3300e0f34cSAndy Grover #include <linux/kernel.h> 3400e0f34cSAndy Grover #include <linux/list.h> 355a0e3ad6STejun Heo #include <linux/slab.h> 3600e0f34cSAndy Grover #include <net/inet_hashtables.h> 3700e0f34cSAndy Grover 3800e0f34cSAndy Grover #include "rds.h" 3900e0f34cSAndy Grover #include "loop.h" 4000e0f34cSAndy Grover 4100e0f34cSAndy Grover #define RDS_CONNECTION_HASH_BITS 12 4200e0f34cSAndy Grover #define RDS_CONNECTION_HASH_ENTRIES (1 << RDS_CONNECTION_HASH_BITS) 4300e0f34cSAndy Grover #define RDS_CONNECTION_HASH_MASK (RDS_CONNECTION_HASH_ENTRIES - 1) 4400e0f34cSAndy Grover 4500e0f34cSAndy Grover /* converting this to RCU is a chore for another day.. */ 4600e0f34cSAndy Grover static DEFINE_SPINLOCK(rds_conn_lock); 4700e0f34cSAndy Grover static unsigned long rds_conn_count; 4800e0f34cSAndy Grover static struct hlist_head rds_conn_hash[RDS_CONNECTION_HASH_ENTRIES]; 4900e0f34cSAndy Grover static struct kmem_cache *rds_conn_slab; 5000e0f34cSAndy Grover 5100e0f34cSAndy Grover static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr) 5200e0f34cSAndy Grover { 5300e0f34cSAndy Grover /* Pass NULL, don't need struct net for hash */ 5400e0f34cSAndy Grover unsigned long hash = inet_ehashfn(NULL, 5500e0f34cSAndy Grover be32_to_cpu(laddr), 0, 5600e0f34cSAndy Grover be32_to_cpu(faddr), 0); 5700e0f34cSAndy Grover return &rds_conn_hash[hash & RDS_CONNECTION_HASH_MASK]; 5800e0f34cSAndy Grover } 5900e0f34cSAndy Grover 6000e0f34cSAndy Grover #define rds_conn_info_set(var, test, suffix) do { \ 6100e0f34cSAndy Grover if (test) \ 6200e0f34cSAndy Grover var |= RDS_INFO_CONNECTION_FLAG_##suffix; \ 6300e0f34cSAndy Grover } while (0) 6400e0f34cSAndy Grover 65bcf50ef2SChris Mason /* rcu read lock must be held or the connection spinlock */ 6600e0f34cSAndy Grover static struct rds_connection *rds_conn_lookup(struct hlist_head *head, 6700e0f34cSAndy Grover __be32 laddr, __be32 faddr, 6800e0f34cSAndy Grover struct rds_transport *trans) 6900e0f34cSAndy Grover { 7000e0f34cSAndy Grover struct rds_connection *conn, *ret = NULL; 7100e0f34cSAndy Grover struct hlist_node *pos; 7200e0f34cSAndy Grover 73bcf50ef2SChris Mason hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) { 7400e0f34cSAndy Grover if (conn->c_faddr == faddr && conn->c_laddr == laddr && 7500e0f34cSAndy Grover conn->c_trans == trans) { 7600e0f34cSAndy Grover ret = conn; 7700e0f34cSAndy Grover break; 7800e0f34cSAndy Grover } 7900e0f34cSAndy Grover } 8000e0f34cSAndy Grover rdsdebug("returning conn %p for %pI4 -> %pI4\n", ret, 8100e0f34cSAndy Grover &laddr, &faddr); 8200e0f34cSAndy Grover return ret; 8300e0f34cSAndy Grover } 8400e0f34cSAndy Grover 8500e0f34cSAndy Grover /* 8600e0f34cSAndy Grover * This is called by transports as they're bringing down a connection. 8700e0f34cSAndy Grover * It clears partial message state so that the transport can start sending 8800e0f34cSAndy Grover * and receiving over this connection again in the future. It is up to 8900e0f34cSAndy Grover * the transport to have serialized this call with its send and recv. 9000e0f34cSAndy Grover */ 9100e0f34cSAndy Grover void rds_conn_reset(struct rds_connection *conn) 9200e0f34cSAndy Grover { 9300e0f34cSAndy Grover rdsdebug("connection %pI4 to %pI4 reset\n", 9400e0f34cSAndy Grover &conn->c_laddr, &conn->c_faddr); 9500e0f34cSAndy Grover 9600e0f34cSAndy Grover rds_stats_inc(s_conn_reset); 9700e0f34cSAndy Grover rds_send_reset(conn); 9800e0f34cSAndy Grover conn->c_flags = 0; 9900e0f34cSAndy Grover 10000e0f34cSAndy Grover /* Do not clear next_rx_seq here, else we cannot distinguish 10100e0f34cSAndy Grover * retransmitted packets from new packets, and will hand all 10200e0f34cSAndy Grover * of them to the application. That is not consistent with the 10300e0f34cSAndy Grover * reliability guarantees of RDS. */ 10400e0f34cSAndy Grover } 10500e0f34cSAndy Grover 10600e0f34cSAndy Grover /* 10700e0f34cSAndy Grover * There is only every one 'conn' for a given pair of addresses in the 10800e0f34cSAndy Grover * system at a time. They contain messages to be retransmitted and so 10900e0f34cSAndy Grover * span the lifetime of the actual underlying transport connections. 11000e0f34cSAndy Grover * 11100e0f34cSAndy Grover * For now they are not garbage collected once they're created. They 11200e0f34cSAndy Grover * are torn down as the module is removed, if ever. 11300e0f34cSAndy Grover */ 11400e0f34cSAndy Grover static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr, 11500e0f34cSAndy Grover struct rds_transport *trans, gfp_t gfp, 11600e0f34cSAndy Grover int is_outgoing) 11700e0f34cSAndy Grover { 118cb24405eSAndy Grover struct rds_connection *conn, *parent = NULL; 11900e0f34cSAndy Grover struct hlist_head *head = rds_conn_bucket(laddr, faddr); 12000e0f34cSAndy Grover unsigned long flags; 12100e0f34cSAndy Grover int ret; 12200e0f34cSAndy Grover 123bcf50ef2SChris Mason 124bcf50ef2SChris Mason rcu_read_lock(); 12500e0f34cSAndy Grover conn = rds_conn_lookup(head, laddr, faddr, trans); 126f64f9e71SJoe Perches if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport && 127f64f9e71SJoe Perches !is_outgoing) { 12800e0f34cSAndy Grover /* This is a looped back IB connection, and we're 12900e0f34cSAndy Grover * called by the code handling the incoming connect. 13000e0f34cSAndy Grover * We need a second connection object into which we 13100e0f34cSAndy Grover * can stick the other QP. */ 13200e0f34cSAndy Grover parent = conn; 13300e0f34cSAndy Grover conn = parent->c_passive; 13400e0f34cSAndy Grover } 135bcf50ef2SChris Mason rcu_read_unlock(); 13600e0f34cSAndy Grover if (conn) 13700e0f34cSAndy Grover goto out; 13800e0f34cSAndy Grover 13905a178ecSWei Yongjun conn = kmem_cache_zalloc(rds_conn_slab, gfp); 1408690bfa1SAndy Grover if (!conn) { 14100e0f34cSAndy Grover conn = ERR_PTR(-ENOMEM); 14200e0f34cSAndy Grover goto out; 14300e0f34cSAndy Grover } 14400e0f34cSAndy Grover 14500e0f34cSAndy Grover INIT_HLIST_NODE(&conn->c_hash_node); 14600e0f34cSAndy Grover conn->c_laddr = laddr; 14700e0f34cSAndy Grover conn->c_faddr = faddr; 14800e0f34cSAndy Grover spin_lock_init(&conn->c_lock); 14900e0f34cSAndy Grover conn->c_next_tx_seq = 1; 15000e0f34cSAndy Grover 151049ee3f5SAndy Grover spin_lock_init(&conn->c_send_lock); 1529e29db0eSChris Mason atomic_set(&conn->c_send_generation, 1); 1537e3f2952SChris Mason atomic_set(&conn->c_senders, 0); 15400e0f34cSAndy Grover INIT_LIST_HEAD(&conn->c_send_queue); 15500e0f34cSAndy Grover INIT_LIST_HEAD(&conn->c_retrans); 15600e0f34cSAndy Grover 15700e0f34cSAndy Grover ret = rds_cong_get_maps(conn); 15800e0f34cSAndy Grover if (ret) { 15900e0f34cSAndy Grover kmem_cache_free(rds_conn_slab, conn); 16000e0f34cSAndy Grover conn = ERR_PTR(ret); 16100e0f34cSAndy Grover goto out; 16200e0f34cSAndy Grover } 16300e0f34cSAndy Grover 16400e0f34cSAndy Grover /* 16500e0f34cSAndy Grover * This is where a connection becomes loopback. If *any* RDS sockets 16600e0f34cSAndy Grover * can bind to the destination address then we'd rather the messages 16700e0f34cSAndy Grover * flow through loopback rather than either transport. 16800e0f34cSAndy Grover */ 16900e0f34cSAndy Grover if (rds_trans_get_preferred(faddr)) { 17000e0f34cSAndy Grover conn->c_loopback = 1; 17100e0f34cSAndy Grover if (is_outgoing && trans->t_prefer_loopback) { 17200e0f34cSAndy Grover /* "outgoing" connection - and the transport 17300e0f34cSAndy Grover * says it wants the connection handled by the 17400e0f34cSAndy Grover * loopback transport. This is what TCP does. 17500e0f34cSAndy Grover */ 17600e0f34cSAndy Grover trans = &rds_loop_transport; 17700e0f34cSAndy Grover } 17800e0f34cSAndy Grover } 17900e0f34cSAndy Grover 18000e0f34cSAndy Grover conn->c_trans = trans; 18100e0f34cSAndy Grover 18200e0f34cSAndy Grover ret = trans->conn_alloc(conn, gfp); 18300e0f34cSAndy Grover if (ret) { 18400e0f34cSAndy Grover kmem_cache_free(rds_conn_slab, conn); 18500e0f34cSAndy Grover conn = ERR_PTR(ret); 18600e0f34cSAndy Grover goto out; 18700e0f34cSAndy Grover } 18800e0f34cSAndy Grover 18900e0f34cSAndy Grover atomic_set(&conn->c_state, RDS_CONN_DOWN); 19000e0f34cSAndy Grover conn->c_reconnect_jiffies = 0; 19100e0f34cSAndy Grover INIT_DELAYED_WORK(&conn->c_send_w, rds_send_worker); 19200e0f34cSAndy Grover INIT_DELAYED_WORK(&conn->c_recv_w, rds_recv_worker); 19300e0f34cSAndy Grover INIT_DELAYED_WORK(&conn->c_conn_w, rds_connect_worker); 19400e0f34cSAndy Grover INIT_WORK(&conn->c_down_w, rds_shutdown_worker); 19500e0f34cSAndy Grover mutex_init(&conn->c_cm_lock); 19600e0f34cSAndy Grover conn->c_flags = 0; 19700e0f34cSAndy Grover 19800e0f34cSAndy Grover rdsdebug("allocated conn %p for %pI4 -> %pI4 over %s %s\n", 19900e0f34cSAndy Grover conn, &laddr, &faddr, 20000e0f34cSAndy Grover trans->t_name ? trans->t_name : "[unknown]", 20100e0f34cSAndy Grover is_outgoing ? "(outgoing)" : ""); 20200e0f34cSAndy Grover 203cb24405eSAndy Grover /* 204cb24405eSAndy Grover * Since we ran without holding the conn lock, someone could 205cb24405eSAndy Grover * have created the same conn (either normal or passive) in the 206cb24405eSAndy Grover * interim. We check while holding the lock. If we won, we complete 207cb24405eSAndy Grover * init and return our conn. If we lost, we rollback and return the 208cb24405eSAndy Grover * other one. 209cb24405eSAndy Grover */ 21000e0f34cSAndy Grover spin_lock_irqsave(&rds_conn_lock, flags); 211cb24405eSAndy Grover if (parent) { 212cb24405eSAndy Grover /* Creating passive conn */ 213cb24405eSAndy Grover if (parent->c_passive) { 21400e0f34cSAndy Grover trans->conn_free(conn->c_transport_data); 21500e0f34cSAndy Grover kmem_cache_free(rds_conn_slab, conn); 216cb24405eSAndy Grover conn = parent->c_passive; 21700e0f34cSAndy Grover } else { 218cb24405eSAndy Grover parent->c_passive = conn; 21900e0f34cSAndy Grover rds_cong_add_conn(conn); 22000e0f34cSAndy Grover rds_conn_count++; 22100e0f34cSAndy Grover } 222cb24405eSAndy Grover } else { 223cb24405eSAndy Grover /* Creating normal conn */ 224cb24405eSAndy Grover struct rds_connection *found; 22500e0f34cSAndy Grover 226cb24405eSAndy Grover found = rds_conn_lookup(head, laddr, faddr, trans); 227cb24405eSAndy Grover if (found) { 228cb24405eSAndy Grover trans->conn_free(conn->c_transport_data); 229cb24405eSAndy Grover kmem_cache_free(rds_conn_slab, conn); 230cb24405eSAndy Grover conn = found; 231cb24405eSAndy Grover } else { 232bcf50ef2SChris Mason hlist_add_head_rcu(&conn->c_hash_node, head); 233cb24405eSAndy Grover rds_cong_add_conn(conn); 234cb24405eSAndy Grover rds_conn_count++; 235cb24405eSAndy Grover } 236cb24405eSAndy Grover } 23700e0f34cSAndy Grover spin_unlock_irqrestore(&rds_conn_lock, flags); 23800e0f34cSAndy Grover 23900e0f34cSAndy Grover out: 24000e0f34cSAndy Grover return conn; 24100e0f34cSAndy Grover } 24200e0f34cSAndy Grover 24300e0f34cSAndy Grover struct rds_connection *rds_conn_create(__be32 laddr, __be32 faddr, 24400e0f34cSAndy Grover struct rds_transport *trans, gfp_t gfp) 24500e0f34cSAndy Grover { 24600e0f34cSAndy Grover return __rds_conn_create(laddr, faddr, trans, gfp, 0); 24700e0f34cSAndy Grover } 248616b757aSAndy Grover EXPORT_SYMBOL_GPL(rds_conn_create); 24900e0f34cSAndy Grover 25000e0f34cSAndy Grover struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr, 25100e0f34cSAndy Grover struct rds_transport *trans, gfp_t gfp) 25200e0f34cSAndy Grover { 25300e0f34cSAndy Grover return __rds_conn_create(laddr, faddr, trans, gfp, 1); 25400e0f34cSAndy Grover } 255616b757aSAndy Grover EXPORT_SYMBOL_GPL(rds_conn_create_outgoing); 25600e0f34cSAndy Grover 2572dc39357SAndy Grover void rds_conn_shutdown(struct rds_connection *conn) 2582dc39357SAndy Grover { 2592dc39357SAndy Grover /* shut it down unless it's down already */ 2602dc39357SAndy Grover if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_DOWN)) { 2612dc39357SAndy Grover /* 2622dc39357SAndy Grover * Quiesce the connection mgmt handlers before we start tearing 2632dc39357SAndy Grover * things down. We don't hold the mutex for the entire 2642dc39357SAndy Grover * duration of the shutdown operation, else we may be 2652dc39357SAndy Grover * deadlocking with the CM handler. Instead, the CM event 2662dc39357SAndy Grover * handler is supposed to check for state DISCONNECTING 2672dc39357SAndy Grover */ 2682dc39357SAndy Grover mutex_lock(&conn->c_cm_lock); 2692dc39357SAndy Grover if (!rds_conn_transition(conn, RDS_CONN_UP, RDS_CONN_DISCONNECTING) 2702dc39357SAndy Grover && !rds_conn_transition(conn, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) { 2712dc39357SAndy Grover rds_conn_error(conn, "shutdown called in state %d\n", 2722dc39357SAndy Grover atomic_read(&conn->c_state)); 2732dc39357SAndy Grover mutex_unlock(&conn->c_cm_lock); 2742dc39357SAndy Grover return; 2752dc39357SAndy Grover } 2762dc39357SAndy Grover mutex_unlock(&conn->c_cm_lock); 2772dc39357SAndy Grover 278049ee3f5SAndy Grover /* verify everybody's out of rds_send_xmit() */ 279049ee3f5SAndy Grover spin_lock_irq(&conn->c_send_lock); 280049ee3f5SAndy Grover spin_unlock_irq(&conn->c_send_lock); 281049ee3f5SAndy Grover 2827e3f2952SChris Mason while(atomic_read(&conn->c_senders)) { 2837e3f2952SChris Mason schedule_timeout(1); 2847e3f2952SChris Mason spin_lock_irq(&conn->c_send_lock); 2857e3f2952SChris Mason spin_unlock_irq(&conn->c_send_lock); 2867e3f2952SChris Mason } 2877e3f2952SChris Mason 2882dc39357SAndy Grover conn->c_trans->conn_shutdown(conn); 2892dc39357SAndy Grover rds_conn_reset(conn); 2902dc39357SAndy Grover 2912dc39357SAndy Grover if (!rds_conn_transition(conn, RDS_CONN_DISCONNECTING, RDS_CONN_DOWN)) { 2922dc39357SAndy Grover /* This can happen - eg when we're in the middle of tearing 2932dc39357SAndy Grover * down the connection, and someone unloads the rds module. 2942dc39357SAndy Grover * Quite reproduceable with loopback connections. 2952dc39357SAndy Grover * Mostly harmless. 2962dc39357SAndy Grover */ 2972dc39357SAndy Grover rds_conn_error(conn, 2982dc39357SAndy Grover "%s: failed to transition to state DOWN, " 2992dc39357SAndy Grover "current state is %d\n", 3002dc39357SAndy Grover __func__, 3012dc39357SAndy Grover atomic_read(&conn->c_state)); 3022dc39357SAndy Grover return; 3032dc39357SAndy Grover } 3042dc39357SAndy Grover } 3052dc39357SAndy Grover 3062dc39357SAndy Grover /* Then reconnect if it's still live. 3072dc39357SAndy Grover * The passive side of an IB loopback connection is never added 3082dc39357SAndy Grover * to the conn hash, so we never trigger a reconnect on this 3092dc39357SAndy Grover * conn - the reconnect is always triggered by the active peer. */ 3102dc39357SAndy Grover cancel_delayed_work_sync(&conn->c_conn_w); 311bcf50ef2SChris Mason rcu_read_lock(); 312bcf50ef2SChris Mason if (!hlist_unhashed(&conn->c_hash_node)) { 313bcf50ef2SChris Mason rcu_read_unlock(); 3142dc39357SAndy Grover rds_queue_reconnect(conn); 315bcf50ef2SChris Mason } else { 316bcf50ef2SChris Mason rcu_read_unlock(); 317bcf50ef2SChris Mason } 3182dc39357SAndy Grover } 3192dc39357SAndy Grover 3202dc39357SAndy Grover /* 3212dc39357SAndy Grover * Stop and free a connection. 3222dc39357SAndy Grover */ 32300e0f34cSAndy Grover void rds_conn_destroy(struct rds_connection *conn) 32400e0f34cSAndy Grover { 32500e0f34cSAndy Grover struct rds_message *rm, *rtmp; 32600e0f34cSAndy Grover 32700e0f34cSAndy Grover rdsdebug("freeing conn %p for %pI4 -> " 32800e0f34cSAndy Grover "%pI4\n", conn, &conn->c_laddr, 32900e0f34cSAndy Grover &conn->c_faddr); 33000e0f34cSAndy Grover 331abf45439SChris Mason /* Ensure conn will not be scheduled for reconnect */ 332abf45439SChris Mason spin_lock_irq(&rds_conn_lock); 333bcf50ef2SChris Mason hlist_del_init_rcu(&conn->c_hash_node); 334abf45439SChris Mason spin_unlock_irq(&rds_conn_lock); 33500e0f34cSAndy Grover 336bcf50ef2SChris Mason synchronize_rcu(); 337bcf50ef2SChris Mason 338bcf50ef2SChris Mason rds_conn_shutdown(conn); 33900e0f34cSAndy Grover 34000e0f34cSAndy Grover /* tear down queued messages */ 34100e0f34cSAndy Grover list_for_each_entry_safe(rm, rtmp, 34200e0f34cSAndy Grover &conn->c_send_queue, 34300e0f34cSAndy Grover m_conn_item) { 34400e0f34cSAndy Grover list_del_init(&rm->m_conn_item); 34500e0f34cSAndy Grover BUG_ON(!list_empty(&rm->m_sock_item)); 34600e0f34cSAndy Grover rds_message_put(rm); 34700e0f34cSAndy Grover } 34800e0f34cSAndy Grover if (conn->c_xmit_rm) 34900e0f34cSAndy Grover rds_message_put(conn->c_xmit_rm); 35000e0f34cSAndy Grover 35100e0f34cSAndy Grover conn->c_trans->conn_free(conn->c_transport_data); 35200e0f34cSAndy Grover 35300e0f34cSAndy Grover /* 35400e0f34cSAndy Grover * The congestion maps aren't freed up here. They're 35500e0f34cSAndy Grover * freed by rds_cong_exit() after all the connections 35600e0f34cSAndy Grover * have been freed. 35700e0f34cSAndy Grover */ 35800e0f34cSAndy Grover rds_cong_remove_conn(conn); 35900e0f34cSAndy Grover 36000e0f34cSAndy Grover BUG_ON(!list_empty(&conn->c_retrans)); 36100e0f34cSAndy Grover kmem_cache_free(rds_conn_slab, conn); 36200e0f34cSAndy Grover 36300e0f34cSAndy Grover rds_conn_count--; 36400e0f34cSAndy Grover } 365616b757aSAndy Grover EXPORT_SYMBOL_GPL(rds_conn_destroy); 36600e0f34cSAndy Grover 36700e0f34cSAndy Grover static void rds_conn_message_info(struct socket *sock, unsigned int len, 36800e0f34cSAndy Grover struct rds_info_iterator *iter, 36900e0f34cSAndy Grover struct rds_info_lengths *lens, 37000e0f34cSAndy Grover int want_send) 37100e0f34cSAndy Grover { 37200e0f34cSAndy Grover struct hlist_head *head; 37300e0f34cSAndy Grover struct hlist_node *pos; 37400e0f34cSAndy Grover struct list_head *list; 37500e0f34cSAndy Grover struct rds_connection *conn; 37600e0f34cSAndy Grover struct rds_message *rm; 37700e0f34cSAndy Grover unsigned int total = 0; 378*501dcccdSZach Brown unsigned long flags; 37900e0f34cSAndy Grover size_t i; 38000e0f34cSAndy Grover 38100e0f34cSAndy Grover len /= sizeof(struct rds_info_message); 38200e0f34cSAndy Grover 383bcf50ef2SChris Mason rcu_read_lock(); 38400e0f34cSAndy Grover 38500e0f34cSAndy Grover for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); 38600e0f34cSAndy Grover i++, head++) { 387bcf50ef2SChris Mason hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) { 38800e0f34cSAndy Grover if (want_send) 38900e0f34cSAndy Grover list = &conn->c_send_queue; 39000e0f34cSAndy Grover else 39100e0f34cSAndy Grover list = &conn->c_retrans; 39200e0f34cSAndy Grover 393*501dcccdSZach Brown spin_lock_irqsave(&conn->c_lock, flags); 39400e0f34cSAndy Grover 39500e0f34cSAndy Grover /* XXX too lazy to maintain counts.. */ 39600e0f34cSAndy Grover list_for_each_entry(rm, list, m_conn_item) { 39700e0f34cSAndy Grover total++; 39800e0f34cSAndy Grover if (total <= len) 39900e0f34cSAndy Grover rds_inc_info_copy(&rm->m_inc, iter, 40000e0f34cSAndy Grover conn->c_laddr, 40100e0f34cSAndy Grover conn->c_faddr, 0); 40200e0f34cSAndy Grover } 40300e0f34cSAndy Grover 404*501dcccdSZach Brown spin_unlock_irqrestore(&conn->c_lock, flags); 40500e0f34cSAndy Grover } 40600e0f34cSAndy Grover } 407bcf50ef2SChris Mason rcu_read_unlock(); 40800e0f34cSAndy Grover 40900e0f34cSAndy Grover lens->nr = total; 41000e0f34cSAndy Grover lens->each = sizeof(struct rds_info_message); 41100e0f34cSAndy Grover } 41200e0f34cSAndy Grover 41300e0f34cSAndy Grover static void rds_conn_message_info_send(struct socket *sock, unsigned int len, 41400e0f34cSAndy Grover struct rds_info_iterator *iter, 41500e0f34cSAndy Grover struct rds_info_lengths *lens) 41600e0f34cSAndy Grover { 41700e0f34cSAndy Grover rds_conn_message_info(sock, len, iter, lens, 1); 41800e0f34cSAndy Grover } 41900e0f34cSAndy Grover 42000e0f34cSAndy Grover static void rds_conn_message_info_retrans(struct socket *sock, 42100e0f34cSAndy Grover unsigned int len, 42200e0f34cSAndy Grover struct rds_info_iterator *iter, 42300e0f34cSAndy Grover struct rds_info_lengths *lens) 42400e0f34cSAndy Grover { 42500e0f34cSAndy Grover rds_conn_message_info(sock, len, iter, lens, 0); 42600e0f34cSAndy Grover } 42700e0f34cSAndy Grover 42800e0f34cSAndy Grover void rds_for_each_conn_info(struct socket *sock, unsigned int len, 42900e0f34cSAndy Grover struct rds_info_iterator *iter, 43000e0f34cSAndy Grover struct rds_info_lengths *lens, 43100e0f34cSAndy Grover int (*visitor)(struct rds_connection *, void *), 43200e0f34cSAndy Grover size_t item_len) 43300e0f34cSAndy Grover { 43400e0f34cSAndy Grover uint64_t buffer[(item_len + 7) / 8]; 43500e0f34cSAndy Grover struct hlist_head *head; 43600e0f34cSAndy Grover struct hlist_node *pos; 43700e0f34cSAndy Grover struct rds_connection *conn; 43800e0f34cSAndy Grover size_t i; 43900e0f34cSAndy Grover 440bcf50ef2SChris Mason rcu_read_lock(); 44100e0f34cSAndy Grover 44200e0f34cSAndy Grover lens->nr = 0; 44300e0f34cSAndy Grover lens->each = item_len; 44400e0f34cSAndy Grover 44500e0f34cSAndy Grover for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); 44600e0f34cSAndy Grover i++, head++) { 447bcf50ef2SChris Mason hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) { 44800e0f34cSAndy Grover 44900e0f34cSAndy Grover /* XXX no c_lock usage.. */ 45000e0f34cSAndy Grover if (!visitor(conn, buffer)) 45100e0f34cSAndy Grover continue; 45200e0f34cSAndy Grover 45300e0f34cSAndy Grover /* We copy as much as we can fit in the buffer, 45400e0f34cSAndy Grover * but we count all items so that the caller 45500e0f34cSAndy Grover * can resize the buffer. */ 45600e0f34cSAndy Grover if (len >= item_len) { 45700e0f34cSAndy Grover rds_info_copy(iter, buffer, item_len); 45800e0f34cSAndy Grover len -= item_len; 45900e0f34cSAndy Grover } 46000e0f34cSAndy Grover lens->nr++; 46100e0f34cSAndy Grover } 46200e0f34cSAndy Grover } 463bcf50ef2SChris Mason rcu_read_unlock(); 46400e0f34cSAndy Grover } 465616b757aSAndy Grover EXPORT_SYMBOL_GPL(rds_for_each_conn_info); 46600e0f34cSAndy Grover 46700e0f34cSAndy Grover static int rds_conn_info_visitor(struct rds_connection *conn, 46800e0f34cSAndy Grover void *buffer) 46900e0f34cSAndy Grover { 47000e0f34cSAndy Grover struct rds_info_connection *cinfo = buffer; 47100e0f34cSAndy Grover 47200e0f34cSAndy Grover cinfo->next_tx_seq = conn->c_next_tx_seq; 47300e0f34cSAndy Grover cinfo->next_rx_seq = conn->c_next_rx_seq; 47400e0f34cSAndy Grover cinfo->laddr = conn->c_laddr; 47500e0f34cSAndy Grover cinfo->faddr = conn->c_faddr; 47600e0f34cSAndy Grover strncpy(cinfo->transport, conn->c_trans->t_name, 47700e0f34cSAndy Grover sizeof(cinfo->transport)); 47800e0f34cSAndy Grover cinfo->flags = 0; 47900e0f34cSAndy Grover 48000e0f34cSAndy Grover rds_conn_info_set(cinfo->flags, 481049ee3f5SAndy Grover spin_is_locked(&conn->c_send_lock), SENDING); 48200e0f34cSAndy Grover /* XXX Future: return the state rather than these funky bits */ 48300e0f34cSAndy Grover rds_conn_info_set(cinfo->flags, 48400e0f34cSAndy Grover atomic_read(&conn->c_state) == RDS_CONN_CONNECTING, 48500e0f34cSAndy Grover CONNECTING); 48600e0f34cSAndy Grover rds_conn_info_set(cinfo->flags, 48700e0f34cSAndy Grover atomic_read(&conn->c_state) == RDS_CONN_UP, 48800e0f34cSAndy Grover CONNECTED); 48900e0f34cSAndy Grover return 1; 49000e0f34cSAndy Grover } 49100e0f34cSAndy Grover 49200e0f34cSAndy Grover static void rds_conn_info(struct socket *sock, unsigned int len, 49300e0f34cSAndy Grover struct rds_info_iterator *iter, 49400e0f34cSAndy Grover struct rds_info_lengths *lens) 49500e0f34cSAndy Grover { 49600e0f34cSAndy Grover rds_for_each_conn_info(sock, len, iter, lens, 49700e0f34cSAndy Grover rds_conn_info_visitor, 49800e0f34cSAndy Grover sizeof(struct rds_info_connection)); 49900e0f34cSAndy Grover } 50000e0f34cSAndy Grover 50100e0f34cSAndy Grover int __init rds_conn_init(void) 50200e0f34cSAndy Grover { 50300e0f34cSAndy Grover rds_conn_slab = kmem_cache_create("rds_connection", 50400e0f34cSAndy Grover sizeof(struct rds_connection), 50500e0f34cSAndy Grover 0, 0, NULL); 5068690bfa1SAndy Grover if (!rds_conn_slab) 50700e0f34cSAndy Grover return -ENOMEM; 50800e0f34cSAndy Grover 50900e0f34cSAndy Grover rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info); 51000e0f34cSAndy Grover rds_info_register_func(RDS_INFO_SEND_MESSAGES, 51100e0f34cSAndy Grover rds_conn_message_info_send); 51200e0f34cSAndy Grover rds_info_register_func(RDS_INFO_RETRANS_MESSAGES, 51300e0f34cSAndy Grover rds_conn_message_info_retrans); 51400e0f34cSAndy Grover 51500e0f34cSAndy Grover return 0; 51600e0f34cSAndy Grover } 51700e0f34cSAndy Grover 51800e0f34cSAndy Grover void rds_conn_exit(void) 51900e0f34cSAndy Grover { 52000e0f34cSAndy Grover rds_loop_exit(); 52100e0f34cSAndy Grover 52200e0f34cSAndy Grover WARN_ON(!hlist_empty(rds_conn_hash)); 52300e0f34cSAndy Grover 52400e0f34cSAndy Grover kmem_cache_destroy(rds_conn_slab); 52500e0f34cSAndy Grover 52600e0f34cSAndy Grover rds_info_deregister_func(RDS_INFO_CONNECTIONS, rds_conn_info); 52700e0f34cSAndy Grover rds_info_deregister_func(RDS_INFO_SEND_MESSAGES, 52800e0f34cSAndy Grover rds_conn_message_info_send); 52900e0f34cSAndy Grover rds_info_deregister_func(RDS_INFO_RETRANS_MESSAGES, 53000e0f34cSAndy Grover rds_conn_message_info_retrans); 53100e0f34cSAndy Grover } 53200e0f34cSAndy Grover 53300e0f34cSAndy Grover /* 53400e0f34cSAndy Grover * Force a disconnect 53500e0f34cSAndy Grover */ 53600e0f34cSAndy Grover void rds_conn_drop(struct rds_connection *conn) 53700e0f34cSAndy Grover { 53800e0f34cSAndy Grover atomic_set(&conn->c_state, RDS_CONN_ERROR); 53900e0f34cSAndy Grover queue_work(rds_wq, &conn->c_down_w); 54000e0f34cSAndy Grover } 541616b757aSAndy Grover EXPORT_SYMBOL_GPL(rds_conn_drop); 54200e0f34cSAndy Grover 54300e0f34cSAndy Grover /* 544f3c6808dSZach Brown * If the connection is down, trigger a connect. We may have scheduled a 545f3c6808dSZach Brown * delayed reconnect however - in this case we should not interfere. 546f3c6808dSZach Brown */ 547f3c6808dSZach Brown void rds_conn_connect_if_down(struct rds_connection *conn) 548f3c6808dSZach Brown { 549f3c6808dSZach Brown if (rds_conn_state(conn) == RDS_CONN_DOWN && 550f3c6808dSZach Brown !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags)) 551f3c6808dSZach Brown queue_delayed_work(rds_wq, &conn->c_conn_w, 0); 552f3c6808dSZach Brown } 553f3c6808dSZach Brown EXPORT_SYMBOL_GPL(rds_conn_connect_if_down); 554f3c6808dSZach Brown 555f3c6808dSZach Brown /* 55600e0f34cSAndy Grover * An error occurred on the connection 55700e0f34cSAndy Grover */ 55800e0f34cSAndy Grover void 55900e0f34cSAndy Grover __rds_conn_error(struct rds_connection *conn, const char *fmt, ...) 56000e0f34cSAndy Grover { 56100e0f34cSAndy Grover va_list ap; 56200e0f34cSAndy Grover 56300e0f34cSAndy Grover va_start(ap, fmt); 56400e0f34cSAndy Grover vprintk(fmt, ap); 56500e0f34cSAndy Grover va_end(ap); 56600e0f34cSAndy Grover 56700e0f34cSAndy Grover rds_conn_drop(conn); 56800e0f34cSAndy Grover } 569