1 /* 2 * Copyright (c) 2006 Oracle. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 #include <linux/kernel.h> 34 #include <linux/in.h> 35 #include <net/tcp.h> 36 37 #include "rds.h" 38 #include "tcp.h" 39 40 /* only for info exporting */ 41 static DEFINE_SPINLOCK(rds_tcp_tc_list_lock); 42 static LIST_HEAD(rds_tcp_tc_list); 43 unsigned int rds_tcp_tc_count; 44 45 /* Track rds_tcp_connection structs so they can be cleaned up */ 46 static DEFINE_SPINLOCK(rds_tcp_conn_lock); 47 static LIST_HEAD(rds_tcp_conn_list); 48 49 static struct kmem_cache *rds_tcp_conn_slab; 50 51 #define RDS_TCP_DEFAULT_BUFSIZE (128 * 1024) 52 53 /* doing it this way avoids calling tcp_sk() */ 54 void rds_tcp_nonagle(struct socket *sock) 55 { 56 mm_segment_t oldfs = get_fs(); 57 int val = 1; 58 59 set_fs(KERNEL_DS); 60 sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val, 61 sizeof(val)); 62 set_fs(oldfs); 63 } 64 65 void rds_tcp_tune(struct socket *sock) 66 { 67 struct sock *sk = sock->sk; 68 69 rds_tcp_nonagle(sock); 70 71 /* 72 * We're trying to saturate gigabit with the default, 73 * see svc_sock_setbufsize(). 74 */ 75 lock_sock(sk); 76 sk->sk_sndbuf = RDS_TCP_DEFAULT_BUFSIZE; 77 sk->sk_rcvbuf = RDS_TCP_DEFAULT_BUFSIZE; 78 sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK; 79 release_sock(sk); 80 } 81 82 u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc) 83 { 84 return tcp_sk(tc->t_sock->sk)->snd_nxt; 85 } 86 87 u32 rds_tcp_snd_una(struct rds_tcp_connection *tc) 88 { 89 return tcp_sk(tc->t_sock->sk)->snd_una; 90 } 91 92 void rds_tcp_restore_callbacks(struct socket *sock, 93 struct rds_tcp_connection *tc) 94 { 95 rdsdebug("restoring sock %p callbacks from tc %p\n", sock, tc); 96 write_lock_bh(&sock->sk->sk_callback_lock); 97 98 /* done under the callback_lock to serialize with write_space */ 99 spin_lock(&rds_tcp_tc_list_lock); 100 list_del_init(&tc->t_list_item); 101 rds_tcp_tc_count--; 102 spin_unlock(&rds_tcp_tc_list_lock); 103 104 tc->t_sock = NULL; 105 106 sock->sk->sk_write_space = tc->t_orig_write_space; 107 sock->sk->sk_data_ready = tc->t_orig_data_ready; 108 sock->sk->sk_state_change = tc->t_orig_state_change; 109 sock->sk->sk_user_data = NULL; 110 111 write_unlock_bh(&sock->sk->sk_callback_lock); 112 } 113 114 /* 115 * This is the only path that sets tc->t_sock. Send and receive trust that 116 * it is set. The RDS_CONN_CONNECTED bit protects those paths from being 117 * called while it isn't set. 118 */ 119 void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn) 120 { 121 struct rds_tcp_connection *tc = conn->c_transport_data; 122 123 rdsdebug("setting sock %p callbacks to tc %p\n", sock, tc); 124 write_lock_bh(&sock->sk->sk_callback_lock); 125 126 /* done under the callback_lock to serialize with write_space */ 127 spin_lock(&rds_tcp_tc_list_lock); 128 list_add_tail(&tc->t_list_item, &rds_tcp_tc_list); 129 rds_tcp_tc_count++; 130 spin_unlock(&rds_tcp_tc_list_lock); 131 132 /* accepted sockets need our listen data ready undone */ 133 if (sock->sk->sk_data_ready == rds_tcp_listen_data_ready) 134 sock->sk->sk_data_ready = sock->sk->sk_user_data; 135 136 tc->t_sock = sock; 137 tc->conn = conn; 138 tc->t_orig_data_ready = sock->sk->sk_data_ready; 139 tc->t_orig_write_space = sock->sk->sk_write_space; 140 tc->t_orig_state_change = sock->sk->sk_state_change; 141 142 sock->sk->sk_user_data = conn; 143 sock->sk->sk_data_ready = rds_tcp_data_ready; 144 sock->sk->sk_write_space = rds_tcp_write_space; 145 sock->sk->sk_state_change = rds_tcp_state_change; 146 147 write_unlock_bh(&sock->sk->sk_callback_lock); 148 } 149 150 static void rds_tcp_tc_info(struct socket *sock, unsigned int len, 151 struct rds_info_iterator *iter, 152 struct rds_info_lengths *lens) 153 { 154 struct rds_info_tcp_socket tsinfo; 155 struct rds_tcp_connection *tc; 156 unsigned long flags; 157 struct sockaddr_in sin; 158 int sinlen; 159 160 spin_lock_irqsave(&rds_tcp_tc_list_lock, flags); 161 162 if (len / sizeof(tsinfo) < rds_tcp_tc_count) 163 goto out; 164 165 list_for_each_entry(tc, &rds_tcp_tc_list, t_list_item) { 166 167 sock->ops->getname(sock, (struct sockaddr *)&sin, &sinlen, 0); 168 tsinfo.local_addr = sin.sin_addr.s_addr; 169 tsinfo.local_port = sin.sin_port; 170 sock->ops->getname(sock, (struct sockaddr *)&sin, &sinlen, 1); 171 tsinfo.peer_addr = sin.sin_addr.s_addr; 172 tsinfo.peer_port = sin.sin_port; 173 174 tsinfo.hdr_rem = tc->t_tinc_hdr_rem; 175 tsinfo.data_rem = tc->t_tinc_data_rem; 176 tsinfo.last_sent_nxt = tc->t_last_sent_nxt; 177 tsinfo.last_expected_una = tc->t_last_expected_una; 178 tsinfo.last_seen_una = tc->t_last_seen_una; 179 180 rds_info_copy(iter, &tsinfo, sizeof(tsinfo)); 181 } 182 183 out: 184 lens->nr = rds_tcp_tc_count; 185 lens->each = sizeof(tsinfo); 186 187 spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags); 188 } 189 190 static int rds_tcp_laddr_check(__be32 addr) 191 { 192 if (inet_addr_type(&init_net, addr) == RTN_LOCAL) 193 return 0; 194 return -EADDRNOTAVAIL; 195 } 196 197 static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp) 198 { 199 struct rds_tcp_connection *tc; 200 201 tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp); 202 if (tc == NULL) 203 return -ENOMEM; 204 205 tc->t_sock = NULL; 206 tc->t_tinc = NULL; 207 tc->t_tinc_hdr_rem = sizeof(struct rds_header); 208 tc->t_tinc_data_rem = 0; 209 210 conn->c_transport_data = tc; 211 212 spin_lock_irq(&rds_tcp_conn_lock); 213 list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list); 214 spin_unlock_irq(&rds_tcp_conn_lock); 215 216 rdsdebug("alloced tc %p\n", conn->c_transport_data); 217 return 0; 218 } 219 220 static void rds_tcp_conn_free(void *arg) 221 { 222 struct rds_tcp_connection *tc = arg; 223 rdsdebug("freeing tc %p\n", tc); 224 kmem_cache_free(rds_tcp_conn_slab, tc); 225 } 226 227 static void rds_tcp_destroy_conns(void) 228 { 229 struct rds_tcp_connection *tc, *_tc; 230 LIST_HEAD(tmp_list); 231 232 /* avoid calling conn_destroy with irqs off */ 233 spin_lock_irq(&rds_tcp_conn_lock); 234 list_splice(&rds_tcp_conn_list, &tmp_list); 235 INIT_LIST_HEAD(&rds_tcp_conn_list); 236 spin_unlock_irq(&rds_tcp_conn_lock); 237 238 list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) { 239 if (tc->conn->c_passive) 240 rds_conn_destroy(tc->conn->c_passive); 241 rds_conn_destroy(tc->conn); 242 } 243 } 244 245 void rds_tcp_exit(void) 246 { 247 rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info); 248 rds_tcp_listen_stop(); 249 rds_tcp_destroy_conns(); 250 rds_trans_unregister(&rds_tcp_transport); 251 rds_tcp_recv_exit(); 252 kmem_cache_destroy(rds_tcp_conn_slab); 253 } 254 module_exit(rds_tcp_exit); 255 256 struct rds_transport rds_tcp_transport = { 257 .laddr_check = rds_tcp_laddr_check, 258 .xmit_prepare = rds_tcp_xmit_prepare, 259 .xmit_complete = rds_tcp_xmit_complete, 260 .xmit_cong_map = rds_tcp_xmit_cong_map, 261 .xmit = rds_tcp_xmit, 262 .recv = rds_tcp_recv, 263 .conn_alloc = rds_tcp_conn_alloc, 264 .conn_free = rds_tcp_conn_free, 265 .conn_connect = rds_tcp_conn_connect, 266 .conn_shutdown = rds_tcp_conn_shutdown, 267 .inc_copy_to_user = rds_tcp_inc_copy_to_user, 268 .inc_purge = rds_tcp_inc_purge, 269 .inc_free = rds_tcp_inc_free, 270 .stats_info_copy = rds_tcp_stats_info_copy, 271 .exit = rds_tcp_exit, 272 .t_owner = THIS_MODULE, 273 .t_name = "tcp", 274 .t_type = RDS_TRANS_TCP, 275 .t_prefer_loopback = 1, 276 }; 277 278 int __init rds_tcp_init(void) 279 { 280 int ret; 281 282 rds_tcp_conn_slab = kmem_cache_create("rds_tcp_connection", 283 sizeof(struct rds_tcp_connection), 284 0, 0, NULL); 285 if (rds_tcp_conn_slab == NULL) { 286 ret = -ENOMEM; 287 goto out; 288 } 289 290 ret = rds_tcp_recv_init(); 291 if (ret) 292 goto out_slab; 293 294 ret = rds_trans_register(&rds_tcp_transport); 295 if (ret) 296 goto out_recv; 297 298 ret = rds_tcp_listen_init(); 299 if (ret) 300 goto out_register; 301 302 rds_info_register_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info); 303 304 goto out; 305 306 out_register: 307 rds_trans_unregister(&rds_tcp_transport); 308 out_recv: 309 rds_tcp_recv_exit(); 310 out_slab: 311 kmem_cache_destroy(rds_tcp_conn_slab); 312 out: 313 return ret; 314 } 315 module_init(rds_tcp_init); 316 317 MODULE_AUTHOR("Oracle Corporation <rds-devel@oss.oracle.com>"); 318 MODULE_DESCRIPTION("RDS: TCP transport"); 319 MODULE_LICENSE("Dual BSD/GPL"); 320 321