100e0f34cSAndy Grover /* 2*1e2b44e7SKa-Cheong Poon * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved. 300e0f34cSAndy Grover * 400e0f34cSAndy Grover * This software is available to you under a choice of one of two 500e0f34cSAndy Grover * licenses. You may choose to be licensed under the terms of the GNU 600e0f34cSAndy Grover * General Public License (GPL) Version 2, available from the file 700e0f34cSAndy Grover * COPYING in the main directory of this source tree, or the 800e0f34cSAndy Grover * OpenIB.org BSD license below: 900e0f34cSAndy Grover * 1000e0f34cSAndy Grover * Redistribution and use in source and binary forms, with or 1100e0f34cSAndy Grover * without modification, are permitted provided that the following 1200e0f34cSAndy Grover * conditions are met: 1300e0f34cSAndy Grover * 1400e0f34cSAndy Grover * - Redistributions of source code must retain the above 1500e0f34cSAndy Grover * copyright notice, this list of conditions and the following 1600e0f34cSAndy Grover * disclaimer. 1700e0f34cSAndy Grover * 1800e0f34cSAndy Grover * - Redistributions in binary form must reproduce the above 1900e0f34cSAndy Grover * copyright notice, this list of conditions and the following 2000e0f34cSAndy Grover * disclaimer in the documentation and/or other materials 2100e0f34cSAndy Grover * provided with the distribution. 2200e0f34cSAndy Grover * 2300e0f34cSAndy Grover * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 2400e0f34cSAndy Grover * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 2500e0f34cSAndy Grover * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 2600e0f34cSAndy Grover * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 2700e0f34cSAndy Grover * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 2800e0f34cSAndy Grover * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 2900e0f34cSAndy Grover * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 3000e0f34cSAndy Grover * SOFTWARE. 3100e0f34cSAndy Grover * 3200e0f34cSAndy Grover */ 3300e0f34cSAndy Grover #include <linux/kernel.h> 3400e0f34cSAndy Grover #include <linux/list.h> 355a0e3ad6STejun Heo #include <linux/slab.h> 36bc3b2d7fSPaul Gortmaker #include <linux/export.h> 37eee2fa6aSKa-Cheong Poon #include <net/ipv6.h> 38eee2fa6aSKa-Cheong Poon #include <net/inet6_hashtables.h> 39*1e2b44e7SKa-Cheong Poon #include <net/addrconf.h> 4000e0f34cSAndy Grover 4100e0f34cSAndy Grover #include "rds.h" 4200e0f34cSAndy Grover #include "loop.h" 4300e0f34cSAndy Grover 4400e0f34cSAndy Grover #define RDS_CONNECTION_HASH_BITS 12 4500e0f34cSAndy Grover #define RDS_CONNECTION_HASH_ENTRIES (1 << RDS_CONNECTION_HASH_BITS) 4600e0f34cSAndy Grover #define RDS_CONNECTION_HASH_MASK (RDS_CONNECTION_HASH_ENTRIES - 1) 4700e0f34cSAndy Grover 4800e0f34cSAndy Grover /* converting this to RCU is a chore for another day.. */ 4900e0f34cSAndy Grover static DEFINE_SPINLOCK(rds_conn_lock); 5000e0f34cSAndy Grover static unsigned long rds_conn_count; 5100e0f34cSAndy Grover static struct hlist_head rds_conn_hash[RDS_CONNECTION_HASH_ENTRIES]; 5200e0f34cSAndy Grover static struct kmem_cache *rds_conn_slab; 5300e0f34cSAndy Grover 54eee2fa6aSKa-Cheong Poon static struct hlist_head *rds_conn_bucket(const struct in6_addr *laddr, 55eee2fa6aSKa-Cheong Poon const struct in6_addr *faddr) 5600e0f34cSAndy Grover { 57eee2fa6aSKa-Cheong Poon static u32 rds6_hash_secret __read_mostly; 581bbdceefSHannes Frederic Sowa static u32 rds_hash_secret __read_mostly; 591bbdceefSHannes Frederic Sowa 60eee2fa6aSKa-Cheong Poon u32 lhash, fhash, hash; 611bbdceefSHannes Frederic Sowa 621bbdceefSHannes Frederic Sowa net_get_random_once(&rds_hash_secret, sizeof(rds_hash_secret)); 63eee2fa6aSKa-Cheong Poon net_get_random_once(&rds6_hash_secret, sizeof(rds6_hash_secret)); 641bbdceefSHannes Frederic Sowa 65eee2fa6aSKa-Cheong Poon lhash = (__force u32)laddr->s6_addr32[3]; 66eee2fa6aSKa-Cheong Poon fhash = __ipv6_addr_jhash(faddr, rds6_hash_secret); 67eee2fa6aSKa-Cheong Poon hash = __inet6_ehashfn(lhash, 0, fhash, 0, rds_hash_secret); 68eee2fa6aSKa-Cheong Poon 6900e0f34cSAndy Grover return &rds_conn_hash[hash & RDS_CONNECTION_HASH_MASK]; 7000e0f34cSAndy Grover } 7100e0f34cSAndy Grover 7200e0f34cSAndy Grover #define rds_conn_info_set(var, test, suffix) do { \ 7300e0f34cSAndy Grover if (test) \ 7400e0f34cSAndy Grover var |= RDS_INFO_CONNECTION_FLAG_##suffix; \ 7500e0f34cSAndy Grover } while (0) 7600e0f34cSAndy Grover 77bcf50ef2SChris Mason /* rcu read lock must be held or the connection spinlock */ 788f384c01SSowmini Varadhan static struct rds_connection *rds_conn_lookup(struct net *net, 798f384c01SSowmini Varadhan struct hlist_head *head, 80eee2fa6aSKa-Cheong Poon const struct in6_addr *laddr, 81eee2fa6aSKa-Cheong Poon const struct in6_addr *faddr, 82eee2fa6aSKa-Cheong Poon struct rds_transport *trans, 83eee2fa6aSKa-Cheong Poon int dev_if) 8400e0f34cSAndy Grover { 8500e0f34cSAndy Grover struct rds_connection *conn, *ret = NULL; 8600e0f34cSAndy Grover 87b67bfe0dSSasha Levin hlist_for_each_entry_rcu(conn, head, c_hash_node) { 88eee2fa6aSKa-Cheong Poon if (ipv6_addr_equal(&conn->c_faddr, faddr) && 89eee2fa6aSKa-Cheong Poon ipv6_addr_equal(&conn->c_laddr, laddr) && 90eee2fa6aSKa-Cheong Poon conn->c_trans == trans && 91eee2fa6aSKa-Cheong Poon net == rds_conn_net(conn) && 92eee2fa6aSKa-Cheong Poon conn->c_dev_if == dev_if) { 9300e0f34cSAndy Grover ret = conn; 9400e0f34cSAndy Grover break; 9500e0f34cSAndy Grover } 9600e0f34cSAndy Grover } 97eee2fa6aSKa-Cheong Poon rdsdebug("returning conn %p for %pI6c -> %pI6c\n", ret, 98eee2fa6aSKa-Cheong Poon laddr, faddr); 9900e0f34cSAndy Grover return ret; 10000e0f34cSAndy Grover } 10100e0f34cSAndy Grover 10200e0f34cSAndy Grover /* 10300e0f34cSAndy Grover * This is called by transports as they're bringing down a connection. 10400e0f34cSAndy Grover * It clears partial message state so that the transport can start sending 10500e0f34cSAndy Grover * and receiving over this connection again in the future. It is up to 10600e0f34cSAndy Grover * the transport to have serialized this call with its send and recv. 10700e0f34cSAndy Grover */ 108d769ef81SSowmini Varadhan static void rds_conn_path_reset(struct rds_conn_path *cp) 10900e0f34cSAndy Grover { 110d769ef81SSowmini Varadhan struct rds_connection *conn = cp->cp_conn; 111d769ef81SSowmini Varadhan 112eee2fa6aSKa-Cheong Poon rdsdebug("connection %pI6c to %pI6c reset\n", 11300e0f34cSAndy Grover &conn->c_laddr, &conn->c_faddr); 11400e0f34cSAndy Grover 11500e0f34cSAndy Grover rds_stats_inc(s_conn_reset); 116d769ef81SSowmini Varadhan rds_send_path_reset(cp); 117d769ef81SSowmini Varadhan cp->cp_flags = 0; 11800e0f34cSAndy Grover 11900e0f34cSAndy Grover /* Do not clear next_rx_seq here, else we cannot distinguish 12000e0f34cSAndy Grover * retransmitted packets from new packets, and will hand all 12100e0f34cSAndy Grover * of them to the application. That is not consistent with the 12200e0f34cSAndy Grover * reliability guarantees of RDS. */ 12300e0f34cSAndy Grover } 12400e0f34cSAndy Grover 1251c5113cfSSowmini Varadhan static void __rds_conn_path_init(struct rds_connection *conn, 1261c5113cfSSowmini Varadhan struct rds_conn_path *cp, bool is_outgoing) 1271c5113cfSSowmini Varadhan { 1281c5113cfSSowmini Varadhan spin_lock_init(&cp->cp_lock); 1291c5113cfSSowmini Varadhan cp->cp_next_tx_seq = 1; 1301c5113cfSSowmini Varadhan init_waitqueue_head(&cp->cp_waitq); 1311c5113cfSSowmini Varadhan INIT_LIST_HEAD(&cp->cp_send_queue); 1321c5113cfSSowmini Varadhan INIT_LIST_HEAD(&cp->cp_retrans); 1331c5113cfSSowmini Varadhan 1341c5113cfSSowmini Varadhan cp->cp_conn = conn; 1351c5113cfSSowmini Varadhan atomic_set(&cp->cp_state, RDS_CONN_DOWN); 1361c5113cfSSowmini Varadhan cp->cp_send_gen = 0; 1371c5113cfSSowmini Varadhan cp->cp_reconnect_jiffies = 0; 1381c5113cfSSowmini Varadhan INIT_DELAYED_WORK(&cp->cp_send_w, rds_send_worker); 1391c5113cfSSowmini Varadhan INIT_DELAYED_WORK(&cp->cp_recv_w, rds_recv_worker); 1401c5113cfSSowmini Varadhan INIT_DELAYED_WORK(&cp->cp_conn_w, rds_connect_worker); 1411c5113cfSSowmini Varadhan INIT_WORK(&cp->cp_down_w, rds_shutdown_worker); 1421c5113cfSSowmini Varadhan mutex_init(&cp->cp_cm_lock); 1431c5113cfSSowmini Varadhan cp->cp_flags = 0; 1441c5113cfSSowmini Varadhan } 1451c5113cfSSowmini Varadhan 14600e0f34cSAndy Grover /* 14700e0f34cSAndy Grover * There is only every one 'conn' for a given pair of addresses in the 14800e0f34cSAndy Grover * system at a time. They contain messages to be retransmitted and so 14900e0f34cSAndy Grover * span the lifetime of the actual underlying transport connections. 15000e0f34cSAndy Grover * 15100e0f34cSAndy Grover * For now they are not garbage collected once they're created. They 15200e0f34cSAndy Grover * are torn down as the module is removed, if ever. 15300e0f34cSAndy Grover */ 154d5a8ac28SSowmini Varadhan static struct rds_connection *__rds_conn_create(struct net *net, 155eee2fa6aSKa-Cheong Poon const struct in6_addr *laddr, 156eee2fa6aSKa-Cheong Poon const struct in6_addr *faddr, 157eee2fa6aSKa-Cheong Poon struct rds_transport *trans, 158eee2fa6aSKa-Cheong Poon gfp_t gfp, 159eee2fa6aSKa-Cheong Poon int is_outgoing, 160eee2fa6aSKa-Cheong Poon int dev_if) 16100e0f34cSAndy Grover { 162cb24405eSAndy Grover struct rds_connection *conn, *parent = NULL; 16300e0f34cSAndy Grover struct hlist_head *head = rds_conn_bucket(laddr, faddr); 1645adb5bc6SZach Brown struct rds_transport *loop_trans; 16500e0f34cSAndy Grover unsigned long flags; 1665916e2c1SSowmini Varadhan int ret, i; 167840df162SSowmini Varadhan int npaths = (trans->t_mp_capable ? RDS_MPATH_WORKERS : 1); 16800e0f34cSAndy Grover 169bcf50ef2SChris Mason rcu_read_lock(); 170eee2fa6aSKa-Cheong Poon conn = rds_conn_lookup(net, head, laddr, faddr, trans, dev_if); 171eee2fa6aSKa-Cheong Poon if (conn && 172eee2fa6aSKa-Cheong Poon conn->c_loopback && 173eee2fa6aSKa-Cheong Poon conn->c_trans != &rds_loop_transport && 174eee2fa6aSKa-Cheong Poon ipv6_addr_equal(laddr, faddr) && 175eee2fa6aSKa-Cheong Poon !is_outgoing) { 17600e0f34cSAndy Grover /* This is a looped back IB connection, and we're 17700e0f34cSAndy Grover * called by the code handling the incoming connect. 17800e0f34cSAndy Grover * We need a second connection object into which we 17900e0f34cSAndy Grover * can stick the other QP. */ 18000e0f34cSAndy Grover parent = conn; 18100e0f34cSAndy Grover conn = parent->c_passive; 18200e0f34cSAndy Grover } 183bcf50ef2SChris Mason rcu_read_unlock(); 18400e0f34cSAndy Grover if (conn) 18500e0f34cSAndy Grover goto out; 18600e0f34cSAndy Grover 18705a178ecSWei Yongjun conn = kmem_cache_zalloc(rds_conn_slab, gfp); 1888690bfa1SAndy Grover if (!conn) { 18900e0f34cSAndy Grover conn = ERR_PTR(-ENOMEM); 19000e0f34cSAndy Grover goto out; 19100e0f34cSAndy Grover } 192840df162SSowmini Varadhan conn->c_path = kcalloc(npaths, sizeof(struct rds_conn_path), gfp); 193840df162SSowmini Varadhan if (!conn->c_path) { 194840df162SSowmini Varadhan kmem_cache_free(rds_conn_slab, conn); 195840df162SSowmini Varadhan conn = ERR_PTR(-ENOMEM); 196840df162SSowmini Varadhan goto out; 197840df162SSowmini Varadhan } 19800e0f34cSAndy Grover 19900e0f34cSAndy Grover INIT_HLIST_NODE(&conn->c_hash_node); 200eee2fa6aSKa-Cheong Poon conn->c_laddr = *laddr; 201eee2fa6aSKa-Cheong Poon conn->c_isv6 = !ipv6_addr_v4mapped(laddr); 202eee2fa6aSKa-Cheong Poon conn->c_faddr = *faddr; 203eee2fa6aSKa-Cheong Poon conn->c_dev_if = dev_if; 204*1e2b44e7SKa-Cheong Poon /* If the local address is link local, set c_bound_if to be the 205*1e2b44e7SKa-Cheong Poon * index used for this connection. Otherwise, set it to 0 as 206*1e2b44e7SKa-Cheong Poon * the socket is not bound to an interface. c_bound_if is used 207*1e2b44e7SKa-Cheong Poon * to look up a socket when a packet is received 208*1e2b44e7SKa-Cheong Poon */ 209*1e2b44e7SKa-Cheong Poon if (ipv6_addr_type(laddr) & IPV6_ADDR_LINKLOCAL) 210*1e2b44e7SKa-Cheong Poon conn->c_bound_if = dev_if; 211*1e2b44e7SKa-Cheong Poon else 212*1e2b44e7SKa-Cheong Poon conn->c_bound_if = 0; 21300e0f34cSAndy Grover 2141c5113cfSSowmini Varadhan rds_conn_net_set(conn, net); 21500e0f34cSAndy Grover 21600e0f34cSAndy Grover ret = rds_cong_get_maps(conn); 21700e0f34cSAndy Grover if (ret) { 218840df162SSowmini Varadhan kfree(conn->c_path); 21900e0f34cSAndy Grover kmem_cache_free(rds_conn_slab, conn); 22000e0f34cSAndy Grover conn = ERR_PTR(ret); 22100e0f34cSAndy Grover goto out; 22200e0f34cSAndy Grover } 22300e0f34cSAndy Grover 22400e0f34cSAndy Grover /* 22500e0f34cSAndy Grover * This is where a connection becomes loopback. If *any* RDS sockets 22600e0f34cSAndy Grover * can bind to the destination address then we'd rather the messages 22700e0f34cSAndy Grover * flow through loopback rather than either transport. 22800e0f34cSAndy Grover */ 229eee2fa6aSKa-Cheong Poon loop_trans = rds_trans_get_preferred(net, faddr, conn->c_dev_if); 2305adb5bc6SZach Brown if (loop_trans) { 2315adb5bc6SZach Brown rds_trans_put(loop_trans); 23200e0f34cSAndy Grover conn->c_loopback = 1; 23300e0f34cSAndy Grover if (is_outgoing && trans->t_prefer_loopback) { 23400e0f34cSAndy Grover /* "outgoing" connection - and the transport 23500e0f34cSAndy Grover * says it wants the connection handled by the 23600e0f34cSAndy Grover * loopback transport. This is what TCP does. 23700e0f34cSAndy Grover */ 23800e0f34cSAndy Grover trans = &rds_loop_transport; 23900e0f34cSAndy Grover } 24000e0f34cSAndy Grover } 24100e0f34cSAndy Grover 24200e0f34cSAndy Grover conn->c_trans = trans; 24300e0f34cSAndy Grover 2445916e2c1SSowmini Varadhan init_waitqueue_head(&conn->c_hs_waitq); 245840df162SSowmini Varadhan for (i = 0; i < npaths; i++) { 2465916e2c1SSowmini Varadhan __rds_conn_path_init(conn, &conn->c_path[i], 2475916e2c1SSowmini Varadhan is_outgoing); 2485916e2c1SSowmini Varadhan conn->c_path[i].cp_index = i; 2495916e2c1SSowmini Varadhan } 250ebeeb1adSSowmini Varadhan rcu_read_lock(); 251ebeeb1adSSowmini Varadhan if (rds_destroy_pending(conn)) 252ebeeb1adSSowmini Varadhan ret = -ENETDOWN; 253ebeeb1adSSowmini Varadhan else 254d4014d8cSSowmini Varadhan ret = trans->conn_alloc(conn, GFP_ATOMIC); 25500e0f34cSAndy Grover if (ret) { 256ebeeb1adSSowmini Varadhan rcu_read_unlock(); 257840df162SSowmini Varadhan kfree(conn->c_path); 25800e0f34cSAndy Grover kmem_cache_free(rds_conn_slab, conn); 25900e0f34cSAndy Grover conn = ERR_PTR(ret); 26000e0f34cSAndy Grover goto out; 26100e0f34cSAndy Grover } 26200e0f34cSAndy Grover 263eee2fa6aSKa-Cheong Poon rdsdebug("allocated conn %p for %pI6c -> %pI6c over %s %s\n", 264eee2fa6aSKa-Cheong Poon conn, laddr, faddr, 265eee2fa6aSKa-Cheong Poon strnlen(trans->t_name, sizeof(trans->t_name)) ? 266eee2fa6aSKa-Cheong Poon trans->t_name : "[unknown]", is_outgoing ? "(outgoing)" : ""); 26700e0f34cSAndy Grover 268cb24405eSAndy Grover /* 269cb24405eSAndy Grover * Since we ran without holding the conn lock, someone could 270cb24405eSAndy Grover * have created the same conn (either normal or passive) in the 271cb24405eSAndy Grover * interim. We check while holding the lock. If we won, we complete 272cb24405eSAndy Grover * init and return our conn. If we lost, we rollback and return the 273cb24405eSAndy Grover * other one. 274cb24405eSAndy Grover */ 27500e0f34cSAndy Grover spin_lock_irqsave(&rds_conn_lock, flags); 276cb24405eSAndy Grover if (parent) { 277cb24405eSAndy Grover /* Creating passive conn */ 278cb24405eSAndy Grover if (parent->c_passive) { 2791c5113cfSSowmini Varadhan trans->conn_free(conn->c_path[0].cp_transport_data); 280840df162SSowmini Varadhan kfree(conn->c_path); 28100e0f34cSAndy Grover kmem_cache_free(rds_conn_slab, conn); 282cb24405eSAndy Grover conn = parent->c_passive; 28300e0f34cSAndy Grover } else { 284cb24405eSAndy Grover parent->c_passive = conn; 28500e0f34cSAndy Grover rds_cong_add_conn(conn); 28600e0f34cSAndy Grover rds_conn_count++; 28700e0f34cSAndy Grover } 288cb24405eSAndy Grover } else { 289cb24405eSAndy Grover /* Creating normal conn */ 290cb24405eSAndy Grover struct rds_connection *found; 29100e0f34cSAndy Grover 292eee2fa6aSKa-Cheong Poon found = rds_conn_lookup(net, head, laddr, faddr, trans, 293eee2fa6aSKa-Cheong Poon dev_if); 294cb24405eSAndy Grover if (found) { 2951c5113cfSSowmini Varadhan struct rds_conn_path *cp; 2961c5113cfSSowmini Varadhan int i; 2971c5113cfSSowmini Varadhan 298840df162SSowmini Varadhan for (i = 0; i < npaths; i++) { 2991c5113cfSSowmini Varadhan cp = &conn->c_path[i]; 30002105b2cSSowmini Varadhan /* The ->conn_alloc invocation may have 30102105b2cSSowmini Varadhan * allocated resource for all paths, so all 30202105b2cSSowmini Varadhan * of them may have to be freed here. 30302105b2cSSowmini Varadhan */ 30402105b2cSSowmini Varadhan if (cp->cp_transport_data) 3051c5113cfSSowmini Varadhan trans->conn_free(cp->cp_transport_data); 3061c5113cfSSowmini Varadhan } 307840df162SSowmini Varadhan kfree(conn->c_path); 308cb24405eSAndy Grover kmem_cache_free(rds_conn_slab, conn); 309cb24405eSAndy Grover conn = found; 310cb24405eSAndy Grover } else { 311905dd418SSowmini Varadhan conn->c_my_gen_num = rds_gen_num; 312905dd418SSowmini Varadhan conn->c_peer_gen_num = 0; 313bcf50ef2SChris Mason hlist_add_head_rcu(&conn->c_hash_node, head); 314cb24405eSAndy Grover rds_cong_add_conn(conn); 315cb24405eSAndy Grover rds_conn_count++; 316cb24405eSAndy Grover } 317cb24405eSAndy Grover } 31800e0f34cSAndy Grover spin_unlock_irqrestore(&rds_conn_lock, flags); 319ebeeb1adSSowmini Varadhan rcu_read_unlock(); 32000e0f34cSAndy Grover 32100e0f34cSAndy Grover out: 32200e0f34cSAndy Grover return conn; 32300e0f34cSAndy Grover } 32400e0f34cSAndy Grover 325d5a8ac28SSowmini Varadhan struct rds_connection *rds_conn_create(struct net *net, 326eee2fa6aSKa-Cheong Poon const struct in6_addr *laddr, 327eee2fa6aSKa-Cheong Poon const struct in6_addr *faddr, 328eee2fa6aSKa-Cheong Poon struct rds_transport *trans, gfp_t gfp, 329eee2fa6aSKa-Cheong Poon int dev_if) 33000e0f34cSAndy Grover { 331eee2fa6aSKa-Cheong Poon return __rds_conn_create(net, laddr, faddr, trans, gfp, 0, dev_if); 33200e0f34cSAndy Grover } 333616b757aSAndy Grover EXPORT_SYMBOL_GPL(rds_conn_create); 33400e0f34cSAndy Grover 335d5a8ac28SSowmini Varadhan struct rds_connection *rds_conn_create_outgoing(struct net *net, 336eee2fa6aSKa-Cheong Poon const struct in6_addr *laddr, 337eee2fa6aSKa-Cheong Poon const struct in6_addr *faddr, 338eee2fa6aSKa-Cheong Poon struct rds_transport *trans, 339eee2fa6aSKa-Cheong Poon gfp_t gfp, int dev_if) 34000e0f34cSAndy Grover { 341eee2fa6aSKa-Cheong Poon return __rds_conn_create(net, laddr, faddr, trans, gfp, 1, dev_if); 34200e0f34cSAndy Grover } 343616b757aSAndy Grover EXPORT_SYMBOL_GPL(rds_conn_create_outgoing); 34400e0f34cSAndy Grover 345d769ef81SSowmini Varadhan void rds_conn_shutdown(struct rds_conn_path *cp) 3462dc39357SAndy Grover { 347d769ef81SSowmini Varadhan struct rds_connection *conn = cp->cp_conn; 348d769ef81SSowmini Varadhan 3492dc39357SAndy Grover /* shut it down unless it's down already */ 350d769ef81SSowmini Varadhan if (!rds_conn_path_transition(cp, RDS_CONN_DOWN, RDS_CONN_DOWN)) { 3512dc39357SAndy Grover /* 3522dc39357SAndy Grover * Quiesce the connection mgmt handlers before we start tearing 3532dc39357SAndy Grover * things down. We don't hold the mutex for the entire 3542dc39357SAndy Grover * duration of the shutdown operation, else we may be 3552dc39357SAndy Grover * deadlocking with the CM handler. Instead, the CM event 3562dc39357SAndy Grover * handler is supposed to check for state DISCONNECTING 3572dc39357SAndy Grover */ 358d769ef81SSowmini Varadhan mutex_lock(&cp->cp_cm_lock); 359d769ef81SSowmini Varadhan if (!rds_conn_path_transition(cp, RDS_CONN_UP, 360d769ef81SSowmini Varadhan RDS_CONN_DISCONNECTING) && 361d769ef81SSowmini Varadhan !rds_conn_path_transition(cp, RDS_CONN_ERROR, 362d769ef81SSowmini Varadhan RDS_CONN_DISCONNECTING)) { 363d769ef81SSowmini Varadhan rds_conn_path_error(cp, 364d769ef81SSowmini Varadhan "shutdown called in state %d\n", 365d769ef81SSowmini Varadhan atomic_read(&cp->cp_state)); 366d769ef81SSowmini Varadhan mutex_unlock(&cp->cp_cm_lock); 3672dc39357SAndy Grover return; 3682dc39357SAndy Grover } 369d769ef81SSowmini Varadhan mutex_unlock(&cp->cp_cm_lock); 3702dc39357SAndy Grover 371d769ef81SSowmini Varadhan wait_event(cp->cp_waitq, 372d769ef81SSowmini Varadhan !test_bit(RDS_IN_XMIT, &cp->cp_flags)); 373d769ef81SSowmini Varadhan wait_event(cp->cp_waitq, 374d769ef81SSowmini Varadhan !test_bit(RDS_RECV_REFILL, &cp->cp_flags)); 3757e3f2952SChris Mason 376d769ef81SSowmini Varadhan conn->c_trans->conn_path_shutdown(cp); 377d769ef81SSowmini Varadhan rds_conn_path_reset(cp); 3782dc39357SAndy Grover 379d769ef81SSowmini Varadhan if (!rds_conn_path_transition(cp, RDS_CONN_DISCONNECTING, 380e97656d0SSowmini Varadhan RDS_CONN_DOWN) && 381e97656d0SSowmini Varadhan !rds_conn_path_transition(cp, RDS_CONN_ERROR, 382d769ef81SSowmini Varadhan RDS_CONN_DOWN)) { 3832dc39357SAndy Grover /* This can happen - eg when we're in the middle of tearing 3842dc39357SAndy Grover * down the connection, and someone unloads the rds module. 385e97656d0SSowmini Varadhan * Quite reproducible with loopback connections. 3862dc39357SAndy Grover * Mostly harmless. 387e97656d0SSowmini Varadhan * 388e97656d0SSowmini Varadhan * Note that this also happens with rds-tcp because 389e97656d0SSowmini Varadhan * we could have triggered rds_conn_path_drop in irq 390e97656d0SSowmini Varadhan * mode from rds_tcp_state change on the receipt of 391e97656d0SSowmini Varadhan * a FIN, thus we need to recheck for RDS_CONN_ERROR 392e97656d0SSowmini Varadhan * here. 3932dc39357SAndy Grover */ 394d769ef81SSowmini Varadhan rds_conn_path_error(cp, "%s: failed to transition " 395d769ef81SSowmini Varadhan "to state DOWN, current state " 396d769ef81SSowmini Varadhan "is %d\n", __func__, 397d769ef81SSowmini Varadhan atomic_read(&cp->cp_state)); 3982dc39357SAndy Grover return; 3992dc39357SAndy Grover } 4002dc39357SAndy Grover } 4012dc39357SAndy Grover 4022dc39357SAndy Grover /* Then reconnect if it's still live. 4032dc39357SAndy Grover * The passive side of an IB loopback connection is never added 4042dc39357SAndy Grover * to the conn hash, so we never trigger a reconnect on this 4052dc39357SAndy Grover * conn - the reconnect is always triggered by the active peer. */ 406d769ef81SSowmini Varadhan cancel_delayed_work_sync(&cp->cp_conn_w); 407bcf50ef2SChris Mason rcu_read_lock(); 408bcf50ef2SChris Mason if (!hlist_unhashed(&conn->c_hash_node)) { 409bcf50ef2SChris Mason rcu_read_unlock(); 410d769ef81SSowmini Varadhan rds_queue_reconnect(cp); 411bcf50ef2SChris Mason } else { 412bcf50ef2SChris Mason rcu_read_unlock(); 413bcf50ef2SChris Mason } 4142dc39357SAndy Grover } 4152dc39357SAndy Grover 4163ecc5693SSowmini Varadhan /* destroy a single rds_conn_path. rds_conn_destroy() iterates over 4173ecc5693SSowmini Varadhan * all paths using rds_conn_path_destroy() 4183ecc5693SSowmini Varadhan */ 4193ecc5693SSowmini Varadhan static void rds_conn_path_destroy(struct rds_conn_path *cp) 4203ecc5693SSowmini Varadhan { 4213ecc5693SSowmini Varadhan struct rds_message *rm, *rtmp; 4223ecc5693SSowmini Varadhan 42302105b2cSSowmini Varadhan if (!cp->cp_transport_data) 42402105b2cSSowmini Varadhan return; 42502105b2cSSowmini Varadhan 4263ecc5693SSowmini Varadhan /* make sure lingering queued work won't try to ref the conn */ 4273ecc5693SSowmini Varadhan cancel_delayed_work_sync(&cp->cp_send_w); 4283ecc5693SSowmini Varadhan cancel_delayed_work_sync(&cp->cp_recv_w); 4293ecc5693SSowmini Varadhan 430aed20a53SSowmini Varadhan rds_conn_path_drop(cp, true); 431aed20a53SSowmini Varadhan flush_work(&cp->cp_down_w); 432aed20a53SSowmini Varadhan 4333ecc5693SSowmini Varadhan /* tear down queued messages */ 4343ecc5693SSowmini Varadhan list_for_each_entry_safe(rm, rtmp, 4353ecc5693SSowmini Varadhan &cp->cp_send_queue, 4363ecc5693SSowmini Varadhan m_conn_item) { 4373ecc5693SSowmini Varadhan list_del_init(&rm->m_conn_item); 4383ecc5693SSowmini Varadhan BUG_ON(!list_empty(&rm->m_sock_item)); 4393ecc5693SSowmini Varadhan rds_message_put(rm); 4403ecc5693SSowmini Varadhan } 4413ecc5693SSowmini Varadhan if (cp->cp_xmit_rm) 4423ecc5693SSowmini Varadhan rds_message_put(cp->cp_xmit_rm); 4433ecc5693SSowmini Varadhan 4443db6e0d1SSowmini Varadhan WARN_ON(delayed_work_pending(&cp->cp_send_w)); 4453db6e0d1SSowmini Varadhan WARN_ON(delayed_work_pending(&cp->cp_recv_w)); 4463db6e0d1SSowmini Varadhan WARN_ON(delayed_work_pending(&cp->cp_conn_w)); 4473db6e0d1SSowmini Varadhan WARN_ON(work_pending(&cp->cp_down_w)); 4483db6e0d1SSowmini Varadhan 4493ecc5693SSowmini Varadhan cp->cp_conn->c_trans->conn_free(cp->cp_transport_data); 4503ecc5693SSowmini Varadhan } 4513ecc5693SSowmini Varadhan 4522dc39357SAndy Grover /* 4532dc39357SAndy Grover * Stop and free a connection. 454ffcec0e1SZach Brown * 455ffcec0e1SZach Brown * This can only be used in very limited circumstances. It assumes that once 456ffcec0e1SZach Brown * the conn has been shutdown that no one else is referencing the connection. 457ffcec0e1SZach Brown * We can only ensure this in the rmmod path in the current code. 4582dc39357SAndy Grover */ 45900e0f34cSAndy Grover void rds_conn_destroy(struct rds_connection *conn) 46000e0f34cSAndy Grover { 461fe8ff6b5SZach Brown unsigned long flags; 46202105b2cSSowmini Varadhan int i; 46302105b2cSSowmini Varadhan struct rds_conn_path *cp; 464840df162SSowmini Varadhan int npaths = (conn->c_trans->t_mp_capable ? RDS_MPATH_WORKERS : 1); 46500e0f34cSAndy Grover 46600e0f34cSAndy Grover rdsdebug("freeing conn %p for %pI4 -> " 46700e0f34cSAndy Grover "%pI4\n", conn, &conn->c_laddr, 46800e0f34cSAndy Grover &conn->c_faddr); 46900e0f34cSAndy Grover 470abf45439SChris Mason /* Ensure conn will not be scheduled for reconnect */ 471abf45439SChris Mason spin_lock_irq(&rds_conn_lock); 472bcf50ef2SChris Mason hlist_del_init_rcu(&conn->c_hash_node); 473abf45439SChris Mason spin_unlock_irq(&rds_conn_lock); 474bcf50ef2SChris Mason synchronize_rcu(); 475bcf50ef2SChris Mason 476ffcec0e1SZach Brown /* shut the connection down */ 477840df162SSowmini Varadhan for (i = 0; i < npaths; i++) { 4783ecc5693SSowmini Varadhan cp = &conn->c_path[i]; 4793ecc5693SSowmini Varadhan rds_conn_path_destroy(cp); 4803ecc5693SSowmini Varadhan BUG_ON(!list_empty(&cp->cp_retrans)); 48100e0f34cSAndy Grover } 48200e0f34cSAndy Grover 48300e0f34cSAndy Grover /* 48400e0f34cSAndy Grover * The congestion maps aren't freed up here. They're 48500e0f34cSAndy Grover * freed by rds_cong_exit() after all the connections 48600e0f34cSAndy Grover * have been freed. 48700e0f34cSAndy Grover */ 48800e0f34cSAndy Grover rds_cong_remove_conn(conn); 48900e0f34cSAndy Grover 490840df162SSowmini Varadhan kfree(conn->c_path); 49100e0f34cSAndy Grover kmem_cache_free(rds_conn_slab, conn); 49200e0f34cSAndy Grover 493fe8ff6b5SZach Brown spin_lock_irqsave(&rds_conn_lock, flags); 49400e0f34cSAndy Grover rds_conn_count--; 495fe8ff6b5SZach Brown spin_unlock_irqrestore(&rds_conn_lock, flags); 49600e0f34cSAndy Grover } 497616b757aSAndy Grover EXPORT_SYMBOL_GPL(rds_conn_destroy); 49800e0f34cSAndy Grover 499*1e2b44e7SKa-Cheong Poon static void __rds_inc_msg_cp(struct rds_incoming *inc, 500*1e2b44e7SKa-Cheong Poon struct rds_info_iterator *iter, 501*1e2b44e7SKa-Cheong Poon void *saddr, void *daddr, int flip) 502*1e2b44e7SKa-Cheong Poon { 503*1e2b44e7SKa-Cheong Poon rds_inc_info_copy(inc, iter, *(__be32 *)saddr, 504*1e2b44e7SKa-Cheong Poon *(__be32 *)daddr, flip); 505*1e2b44e7SKa-Cheong Poon } 506*1e2b44e7SKa-Cheong Poon 507*1e2b44e7SKa-Cheong Poon static void rds_conn_message_info_cmn(struct socket *sock, unsigned int len, 50800e0f34cSAndy Grover struct rds_info_iterator *iter, 50900e0f34cSAndy Grover struct rds_info_lengths *lens, 51000e0f34cSAndy Grover int want_send) 51100e0f34cSAndy Grover { 51200e0f34cSAndy Grover struct hlist_head *head; 51300e0f34cSAndy Grover struct list_head *list; 51400e0f34cSAndy Grover struct rds_connection *conn; 51500e0f34cSAndy Grover struct rds_message *rm; 51600e0f34cSAndy Grover unsigned int total = 0; 517501dcccdSZach Brown unsigned long flags; 51800e0f34cSAndy Grover size_t i; 519992c9ec5SSowmini Varadhan int j; 52000e0f34cSAndy Grover 52100e0f34cSAndy Grover len /= sizeof(struct rds_info_message); 52200e0f34cSAndy Grover 523bcf50ef2SChris Mason rcu_read_lock(); 52400e0f34cSAndy Grover 52500e0f34cSAndy Grover for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); 52600e0f34cSAndy Grover i++, head++) { 527b67bfe0dSSasha Levin hlist_for_each_entry_rcu(conn, head, c_hash_node) { 528992c9ec5SSowmini Varadhan struct rds_conn_path *cp; 529840df162SSowmini Varadhan int npaths; 53000e0f34cSAndy Grover 531840df162SSowmini Varadhan npaths = (conn->c_trans->t_mp_capable ? 532840df162SSowmini Varadhan RDS_MPATH_WORKERS : 1); 533840df162SSowmini Varadhan 534840df162SSowmini Varadhan for (j = 0; j < npaths; j++) { 535992c9ec5SSowmini Varadhan cp = &conn->c_path[j]; 536992c9ec5SSowmini Varadhan if (want_send) 537992c9ec5SSowmini Varadhan list = &cp->cp_send_queue; 538992c9ec5SSowmini Varadhan else 539992c9ec5SSowmini Varadhan list = &cp->cp_retrans; 540992c9ec5SSowmini Varadhan 541992c9ec5SSowmini Varadhan spin_lock_irqsave(&cp->cp_lock, flags); 54200e0f34cSAndy Grover 54300e0f34cSAndy Grover /* XXX too lazy to maintain counts.. */ 54400e0f34cSAndy Grover list_for_each_entry(rm, list, m_conn_item) { 54500e0f34cSAndy Grover total++; 54600e0f34cSAndy Grover if (total <= len) 547*1e2b44e7SKa-Cheong Poon __rds_inc_msg_cp(&rm->m_inc, 548992c9ec5SSowmini Varadhan iter, 549*1e2b44e7SKa-Cheong Poon &conn->c_laddr, 550*1e2b44e7SKa-Cheong Poon &conn->c_faddr, 551992c9ec5SSowmini Varadhan 0); 55200e0f34cSAndy Grover } 55300e0f34cSAndy Grover 554992c9ec5SSowmini Varadhan spin_unlock_irqrestore(&cp->cp_lock, flags); 555992c9ec5SSowmini Varadhan } 55600e0f34cSAndy Grover } 55700e0f34cSAndy Grover } 558bcf50ef2SChris Mason rcu_read_unlock(); 55900e0f34cSAndy Grover 56000e0f34cSAndy Grover lens->nr = total; 56100e0f34cSAndy Grover lens->each = sizeof(struct rds_info_message); 56200e0f34cSAndy Grover } 56300e0f34cSAndy Grover 564*1e2b44e7SKa-Cheong Poon static void rds_conn_message_info(struct socket *sock, unsigned int len, 565*1e2b44e7SKa-Cheong Poon struct rds_info_iterator *iter, 566*1e2b44e7SKa-Cheong Poon struct rds_info_lengths *lens, 567*1e2b44e7SKa-Cheong Poon int want_send) 568*1e2b44e7SKa-Cheong Poon { 569*1e2b44e7SKa-Cheong Poon rds_conn_message_info_cmn(sock, len, iter, lens, want_send); 570*1e2b44e7SKa-Cheong Poon } 571*1e2b44e7SKa-Cheong Poon 57200e0f34cSAndy Grover static void rds_conn_message_info_send(struct socket *sock, unsigned int len, 57300e0f34cSAndy Grover struct rds_info_iterator *iter, 57400e0f34cSAndy Grover struct rds_info_lengths *lens) 57500e0f34cSAndy Grover { 57600e0f34cSAndy Grover rds_conn_message_info(sock, len, iter, lens, 1); 57700e0f34cSAndy Grover } 57800e0f34cSAndy Grover 57900e0f34cSAndy Grover static void rds_conn_message_info_retrans(struct socket *sock, 58000e0f34cSAndy Grover unsigned int len, 58100e0f34cSAndy Grover struct rds_info_iterator *iter, 58200e0f34cSAndy Grover struct rds_info_lengths *lens) 58300e0f34cSAndy Grover { 58400e0f34cSAndy Grover rds_conn_message_info(sock, len, iter, lens, 0); 58500e0f34cSAndy Grover } 58600e0f34cSAndy Grover 58700e0f34cSAndy Grover void rds_for_each_conn_info(struct socket *sock, unsigned int len, 58800e0f34cSAndy Grover struct rds_info_iterator *iter, 58900e0f34cSAndy Grover struct rds_info_lengths *lens, 59000e0f34cSAndy Grover int (*visitor)(struct rds_connection *, void *), 591f1cb9d68SSalvatore Mesoraca u64 *buffer, 59200e0f34cSAndy Grover size_t item_len) 59300e0f34cSAndy Grover { 59400e0f34cSAndy Grover struct hlist_head *head; 59500e0f34cSAndy Grover struct rds_connection *conn; 59600e0f34cSAndy Grover size_t i; 59700e0f34cSAndy Grover 598bcf50ef2SChris Mason rcu_read_lock(); 59900e0f34cSAndy Grover 60000e0f34cSAndy Grover lens->nr = 0; 60100e0f34cSAndy Grover lens->each = item_len; 60200e0f34cSAndy Grover 60300e0f34cSAndy Grover for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); 60400e0f34cSAndy Grover i++, head++) { 605b67bfe0dSSasha Levin hlist_for_each_entry_rcu(conn, head, c_hash_node) { 60600e0f34cSAndy Grover 60700e0f34cSAndy Grover /* XXX no c_lock usage.. */ 60800e0f34cSAndy Grover if (!visitor(conn, buffer)) 60900e0f34cSAndy Grover continue; 61000e0f34cSAndy Grover 61100e0f34cSAndy Grover /* We copy as much as we can fit in the buffer, 61200e0f34cSAndy Grover * but we count all items so that the caller 61300e0f34cSAndy Grover * can resize the buffer. */ 61400e0f34cSAndy Grover if (len >= item_len) { 61500e0f34cSAndy Grover rds_info_copy(iter, buffer, item_len); 61600e0f34cSAndy Grover len -= item_len; 61700e0f34cSAndy Grover } 61800e0f34cSAndy Grover lens->nr++; 61900e0f34cSAndy Grover } 62000e0f34cSAndy Grover } 621bcf50ef2SChris Mason rcu_read_unlock(); 62200e0f34cSAndy Grover } 623616b757aSAndy Grover EXPORT_SYMBOL_GPL(rds_for_each_conn_info); 62400e0f34cSAndy Grover 625bb789763SSantosh Shilimkar static void rds_walk_conn_path_info(struct socket *sock, unsigned int len, 626992c9ec5SSowmini Varadhan struct rds_info_iterator *iter, 627992c9ec5SSowmini Varadhan struct rds_info_lengths *lens, 628992c9ec5SSowmini Varadhan int (*visitor)(struct rds_conn_path *, void *), 629b2c9272aSSalvatore Mesoraca u64 *buffer, 630992c9ec5SSowmini Varadhan size_t item_len) 631992c9ec5SSowmini Varadhan { 632992c9ec5SSowmini Varadhan struct hlist_head *head; 633992c9ec5SSowmini Varadhan struct rds_connection *conn; 634992c9ec5SSowmini Varadhan size_t i; 635992c9ec5SSowmini Varadhan 636992c9ec5SSowmini Varadhan rcu_read_lock(); 637992c9ec5SSowmini Varadhan 638992c9ec5SSowmini Varadhan lens->nr = 0; 639992c9ec5SSowmini Varadhan lens->each = item_len; 640992c9ec5SSowmini Varadhan 641992c9ec5SSowmini Varadhan for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); 642992c9ec5SSowmini Varadhan i++, head++) { 643992c9ec5SSowmini Varadhan hlist_for_each_entry_rcu(conn, head, c_hash_node) { 644992c9ec5SSowmini Varadhan struct rds_conn_path *cp; 645992c9ec5SSowmini Varadhan 646eee2fa6aSKa-Cheong Poon /* XXX We only copy the information from the first 647eee2fa6aSKa-Cheong Poon * path for now. The problem is that if there are 648eee2fa6aSKa-Cheong Poon * more than one underlying paths, we cannot report 649eee2fa6aSKa-Cheong Poon * information of all of them using the existing 650eee2fa6aSKa-Cheong Poon * API. For example, there is only one next_tx_seq, 651eee2fa6aSKa-Cheong Poon * which path's next_tx_seq should we report? It is 652eee2fa6aSKa-Cheong Poon * a bug in the design of MPRDS. 653eee2fa6aSKa-Cheong Poon */ 654eee2fa6aSKa-Cheong Poon cp = conn->c_path; 655992c9ec5SSowmini Varadhan 656992c9ec5SSowmini Varadhan /* XXX no cp_lock usage.. */ 657992c9ec5SSowmini Varadhan if (!visitor(cp, buffer)) 658992c9ec5SSowmini Varadhan continue; 659992c9ec5SSowmini Varadhan 660992c9ec5SSowmini Varadhan /* We copy as much as we can fit in the buffer, 661992c9ec5SSowmini Varadhan * but we count all items so that the caller 662992c9ec5SSowmini Varadhan * can resize the buffer. 663992c9ec5SSowmini Varadhan */ 664992c9ec5SSowmini Varadhan if (len >= item_len) { 665992c9ec5SSowmini Varadhan rds_info_copy(iter, buffer, item_len); 666992c9ec5SSowmini Varadhan len -= item_len; 667992c9ec5SSowmini Varadhan } 668992c9ec5SSowmini Varadhan lens->nr++; 669992c9ec5SSowmini Varadhan } 670992c9ec5SSowmini Varadhan } 671992c9ec5SSowmini Varadhan rcu_read_unlock(); 672992c9ec5SSowmini Varadhan } 673992c9ec5SSowmini Varadhan 674992c9ec5SSowmini Varadhan static int rds_conn_info_visitor(struct rds_conn_path *cp, void *buffer) 67500e0f34cSAndy Grover { 67600e0f34cSAndy Grover struct rds_info_connection *cinfo = buffer; 677eee2fa6aSKa-Cheong Poon struct rds_connection *conn = cp->cp_conn; 67800e0f34cSAndy Grover 679*1e2b44e7SKa-Cheong Poon if (conn->c_isv6) 680*1e2b44e7SKa-Cheong Poon return 0; 681*1e2b44e7SKa-Cheong Poon 682992c9ec5SSowmini Varadhan cinfo->next_tx_seq = cp->cp_next_tx_seq; 683992c9ec5SSowmini Varadhan cinfo->next_rx_seq = cp->cp_next_rx_seq; 684eee2fa6aSKa-Cheong Poon cinfo->laddr = conn->c_laddr.s6_addr32[3]; 685eee2fa6aSKa-Cheong Poon cinfo->faddr = conn->c_faddr.s6_addr32[3]; 686eee2fa6aSKa-Cheong Poon strncpy(cinfo->transport, conn->c_trans->t_name, 68700e0f34cSAndy Grover sizeof(cinfo->transport)); 68800e0f34cSAndy Grover cinfo->flags = 0; 68900e0f34cSAndy Grover 690992c9ec5SSowmini Varadhan rds_conn_info_set(cinfo->flags, test_bit(RDS_IN_XMIT, &cp->cp_flags), 6910f4b1c7eSZach Brown SENDING); 69200e0f34cSAndy Grover /* XXX Future: return the state rather than these funky bits */ 69300e0f34cSAndy Grover rds_conn_info_set(cinfo->flags, 694992c9ec5SSowmini Varadhan atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING, 69500e0f34cSAndy Grover CONNECTING); 69600e0f34cSAndy Grover rds_conn_info_set(cinfo->flags, 697992c9ec5SSowmini Varadhan atomic_read(&cp->cp_state) == RDS_CONN_UP, 69800e0f34cSAndy Grover CONNECTED); 69900e0f34cSAndy Grover return 1; 70000e0f34cSAndy Grover } 70100e0f34cSAndy Grover 70200e0f34cSAndy Grover static void rds_conn_info(struct socket *sock, unsigned int len, 70300e0f34cSAndy Grover struct rds_info_iterator *iter, 70400e0f34cSAndy Grover struct rds_info_lengths *lens) 70500e0f34cSAndy Grover { 706b2c9272aSSalvatore Mesoraca u64 buffer[(sizeof(struct rds_info_connection) + 7) / 8]; 707b2c9272aSSalvatore Mesoraca 708992c9ec5SSowmini Varadhan rds_walk_conn_path_info(sock, len, iter, lens, 70900e0f34cSAndy Grover rds_conn_info_visitor, 710b2c9272aSSalvatore Mesoraca buffer, 71100e0f34cSAndy Grover sizeof(struct rds_info_connection)); 71200e0f34cSAndy Grover } 71300e0f34cSAndy Grover 714ef87b7eaSZach Brown int rds_conn_init(void) 71500e0f34cSAndy Grover { 716c809195fSSowmini Varadhan int ret; 717c809195fSSowmini Varadhan 718c809195fSSowmini Varadhan ret = rds_loop_net_init(); /* register pernet callback */ 719c809195fSSowmini Varadhan if (ret) 720c809195fSSowmini Varadhan return ret; 721c809195fSSowmini Varadhan 72200e0f34cSAndy Grover rds_conn_slab = kmem_cache_create("rds_connection", 72300e0f34cSAndy Grover sizeof(struct rds_connection), 72400e0f34cSAndy Grover 0, 0, NULL); 725c809195fSSowmini Varadhan if (!rds_conn_slab) { 726c809195fSSowmini Varadhan rds_loop_net_exit(); 72700e0f34cSAndy Grover return -ENOMEM; 728c809195fSSowmini Varadhan } 72900e0f34cSAndy Grover 73000e0f34cSAndy Grover rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info); 73100e0f34cSAndy Grover rds_info_register_func(RDS_INFO_SEND_MESSAGES, 73200e0f34cSAndy Grover rds_conn_message_info_send); 73300e0f34cSAndy Grover rds_info_register_func(RDS_INFO_RETRANS_MESSAGES, 73400e0f34cSAndy Grover rds_conn_message_info_retrans); 73500e0f34cSAndy Grover 73600e0f34cSAndy Grover return 0; 73700e0f34cSAndy Grover } 73800e0f34cSAndy Grover 73900e0f34cSAndy Grover void rds_conn_exit(void) 74000e0f34cSAndy Grover { 741c809195fSSowmini Varadhan rds_loop_net_exit(); /* unregister pernet callback */ 74200e0f34cSAndy Grover rds_loop_exit(); 74300e0f34cSAndy Grover 74400e0f34cSAndy Grover WARN_ON(!hlist_empty(rds_conn_hash)); 74500e0f34cSAndy Grover 74600e0f34cSAndy Grover kmem_cache_destroy(rds_conn_slab); 74700e0f34cSAndy Grover 74800e0f34cSAndy Grover rds_info_deregister_func(RDS_INFO_CONNECTIONS, rds_conn_info); 74900e0f34cSAndy Grover rds_info_deregister_func(RDS_INFO_SEND_MESSAGES, 75000e0f34cSAndy Grover rds_conn_message_info_send); 75100e0f34cSAndy Grover rds_info_deregister_func(RDS_INFO_RETRANS_MESSAGES, 75200e0f34cSAndy Grover rds_conn_message_info_retrans); 75300e0f34cSAndy Grover } 75400e0f34cSAndy Grover 75500e0f34cSAndy Grover /* 75600e0f34cSAndy Grover * Force a disconnect 75700e0f34cSAndy Grover */ 758aed20a53SSowmini Varadhan void rds_conn_path_drop(struct rds_conn_path *cp, bool destroy) 7590cb43965SSowmini Varadhan { 7600cb43965SSowmini Varadhan atomic_set(&cp->cp_state, RDS_CONN_ERROR); 761aed20a53SSowmini Varadhan 7623db6e0d1SSowmini Varadhan rcu_read_lock(); 763ebeeb1adSSowmini Varadhan if (!destroy && rds_destroy_pending(cp->cp_conn)) { 7643db6e0d1SSowmini Varadhan rcu_read_unlock(); 765aed20a53SSowmini Varadhan return; 7663db6e0d1SSowmini Varadhan } 7670cb43965SSowmini Varadhan queue_work(rds_wq, &cp->cp_down_w); 7683db6e0d1SSowmini Varadhan rcu_read_unlock(); 7690cb43965SSowmini Varadhan } 7700cb43965SSowmini Varadhan EXPORT_SYMBOL_GPL(rds_conn_path_drop); 7710cb43965SSowmini Varadhan 77200e0f34cSAndy Grover void rds_conn_drop(struct rds_connection *conn) 77300e0f34cSAndy Grover { 7745916e2c1SSowmini Varadhan WARN_ON(conn->c_trans->t_mp_capable); 775aed20a53SSowmini Varadhan rds_conn_path_drop(&conn->c_path[0], false); 77600e0f34cSAndy Grover } 777616b757aSAndy Grover EXPORT_SYMBOL_GPL(rds_conn_drop); 77800e0f34cSAndy Grover 77900e0f34cSAndy Grover /* 780f3c6808dSZach Brown * If the connection is down, trigger a connect. We may have scheduled a 781f3c6808dSZach Brown * delayed reconnect however - in this case we should not interfere. 782f3c6808dSZach Brown */ 7833c0a5900SSowmini Varadhan void rds_conn_path_connect_if_down(struct rds_conn_path *cp) 7843c0a5900SSowmini Varadhan { 7853db6e0d1SSowmini Varadhan rcu_read_lock(); 786ebeeb1adSSowmini Varadhan if (rds_destroy_pending(cp->cp_conn)) { 7873db6e0d1SSowmini Varadhan rcu_read_unlock(); 7883db6e0d1SSowmini Varadhan return; 7893db6e0d1SSowmini Varadhan } 7903c0a5900SSowmini Varadhan if (rds_conn_path_state(cp) == RDS_CONN_DOWN && 7913c0a5900SSowmini Varadhan !test_and_set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags)) 7923c0a5900SSowmini Varadhan queue_delayed_work(rds_wq, &cp->cp_conn_w, 0); 7933db6e0d1SSowmini Varadhan rcu_read_unlock(); 7943c0a5900SSowmini Varadhan } 7951a0e100fSSowmini Varadhan EXPORT_SYMBOL_GPL(rds_conn_path_connect_if_down); 7963c0a5900SSowmini Varadhan 797f3c6808dSZach Brown void rds_conn_connect_if_down(struct rds_connection *conn) 798f3c6808dSZach Brown { 7993c0a5900SSowmini Varadhan WARN_ON(conn->c_trans->t_mp_capable); 8003c0a5900SSowmini Varadhan rds_conn_path_connect_if_down(&conn->c_path[0]); 801f3c6808dSZach Brown } 802f3c6808dSZach Brown EXPORT_SYMBOL_GPL(rds_conn_connect_if_down); 803f3c6808dSZach Brown 804fb1b3dc4SSowmini Varadhan void 805fb1b3dc4SSowmini Varadhan __rds_conn_path_error(struct rds_conn_path *cp, const char *fmt, ...) 806fb1b3dc4SSowmini Varadhan { 807fb1b3dc4SSowmini Varadhan va_list ap; 808fb1b3dc4SSowmini Varadhan 809fb1b3dc4SSowmini Varadhan va_start(ap, fmt); 810fb1b3dc4SSowmini Varadhan vprintk(fmt, ap); 811fb1b3dc4SSowmini Varadhan va_end(ap); 812fb1b3dc4SSowmini Varadhan 813aed20a53SSowmini Varadhan rds_conn_path_drop(cp, false); 814fb1b3dc4SSowmini Varadhan } 815