1 /* 2 * Copyright (c) 2006 Oracle. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 #include <linux/kernel.h> 34 #include <linux/gfp.h> 35 #include <linux/in.h> 36 #include <net/tcp.h> 37 38 #include "rds.h" 39 #include "tcp.h" 40 41 int rds_tcp_keepalive(struct socket *sock) 42 { 43 /* values below based on xs_udp_default_timeout */ 44 int keepidle = 5; /* send a probe 'keepidle' secs after last data */ 45 int keepcnt = 5; /* number of unack'ed probes before declaring dead */ 46 int keepalive = 1; 47 int ret = 0; 48 49 ret = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, 50 (char *)&keepalive, sizeof(keepalive)); 51 if (ret < 0) 52 goto bail; 53 54 ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_KEEPCNT, 55 (char *)&keepcnt, sizeof(keepcnt)); 56 if (ret < 0) 57 goto bail; 58 59 ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_KEEPIDLE, 60 (char *)&keepidle, sizeof(keepidle)); 61 if (ret < 0) 62 goto bail; 63 64 /* KEEPINTVL is the interval between successive probes. We follow 65 * the model in xs_tcp_finish_connecting() and re-use keepidle. 66 */ 67 ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_KEEPINTVL, 68 (char *)&keepidle, sizeof(keepidle)); 69 bail: 70 return ret; 71 } 72 73 /* rds_tcp_accept_one_path(): if accepting on cp_index > 0, make sure the 74 * client's ipaddr < server's ipaddr. Otherwise, close the accepted 75 * socket and force a reconneect from smaller -> larger ip addr. The reason 76 * we special case cp_index 0 is to allow the rds probe ping itself to itself 77 * get through efficiently. 78 * Since reconnects are only initiated from the node with the numerically 79 * smaller ip address, we recycle conns in RDS_CONN_ERROR on the passive side 80 * by moving them to CONNECTING in this function. 81 */ 82 static 83 struct rds_tcp_connection *rds_tcp_accept_one_path(struct rds_connection *conn) 84 { 85 int i; 86 bool peer_is_smaller = IS_CANONICAL(conn->c_faddr, conn->c_laddr); 87 int npaths = max_t(int, 1, conn->c_npaths); 88 89 /* for mprds, all paths MUST be initiated by the peer 90 * with the smaller address. 91 */ 92 if (!peer_is_smaller) { 93 /* Make sure we initiate at least one path if this 94 * has not already been done; rds_start_mprds() will 95 * take care of additional paths, if necessary. 96 */ 97 if (npaths == 1) 98 rds_conn_path_connect_if_down(&conn->c_path[0]); 99 return NULL; 100 } 101 102 for (i = 0; i < npaths; i++) { 103 struct rds_conn_path *cp = &conn->c_path[i]; 104 105 if (rds_conn_path_transition(cp, RDS_CONN_DOWN, 106 RDS_CONN_CONNECTING) || 107 rds_conn_path_transition(cp, RDS_CONN_ERROR, 108 RDS_CONN_CONNECTING)) { 109 return cp->cp_transport_data; 110 } 111 } 112 return NULL; 113 } 114 115 void rds_tcp_set_linger(struct socket *sock) 116 { 117 struct linger no_linger = { 118 .l_onoff = 1, 119 .l_linger = 0, 120 }; 121 122 kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER, 123 (char *)&no_linger, sizeof(no_linger)); 124 } 125 126 int rds_tcp_accept_one(struct socket *sock) 127 { 128 struct socket *new_sock = NULL; 129 struct rds_connection *conn; 130 int ret; 131 struct inet_sock *inet; 132 struct rds_tcp_connection *rs_tcp = NULL; 133 int conn_state; 134 struct rds_conn_path *cp; 135 136 if (!sock) /* module unload or netns delete in progress */ 137 return -ENETUNREACH; 138 139 ret = sock_create_lite(sock->sk->sk_family, 140 sock->sk->sk_type, sock->sk->sk_protocol, 141 &new_sock); 142 if (ret) 143 goto out; 144 145 new_sock->type = sock->type; 146 new_sock->ops = sock->ops; 147 ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, true); 148 if (ret < 0) 149 goto out; 150 151 ret = rds_tcp_keepalive(new_sock); 152 if (ret < 0) 153 goto out; 154 155 rds_tcp_tune(new_sock); 156 157 inet = inet_sk(new_sock->sk); 158 159 rdsdebug("accepted tcp %pI4:%u -> %pI4:%u\n", 160 &inet->inet_saddr, ntohs(inet->inet_sport), 161 &inet->inet_daddr, ntohs(inet->inet_dport)); 162 163 conn = rds_conn_create(sock_net(sock->sk), 164 inet->inet_saddr, inet->inet_daddr, 165 &rds_tcp_transport, GFP_KERNEL); 166 if (IS_ERR(conn)) { 167 ret = PTR_ERR(conn); 168 goto out; 169 } 170 /* An incoming SYN request came in, and TCP just accepted it. 171 * 172 * If the client reboots, this conn will need to be cleaned up. 173 * rds_tcp_state_change() will do that cleanup 174 */ 175 rs_tcp = rds_tcp_accept_one_path(conn); 176 if (!rs_tcp) 177 goto rst_nsk; 178 mutex_lock(&rs_tcp->t_conn_path_lock); 179 cp = rs_tcp->t_cpath; 180 conn_state = rds_conn_path_state(cp); 181 WARN_ON(conn_state == RDS_CONN_UP); 182 if (conn_state != RDS_CONN_CONNECTING && conn_state != RDS_CONN_ERROR) 183 goto rst_nsk; 184 if (rs_tcp->t_sock) { 185 /* Duelling SYN has been handled in rds_tcp_accept_one() */ 186 rds_tcp_reset_callbacks(new_sock, cp); 187 /* rds_connect_path_complete() marks RDS_CONN_UP */ 188 rds_connect_path_complete(cp, RDS_CONN_RESETTING); 189 } else { 190 rds_tcp_set_callbacks(new_sock, cp); 191 rds_connect_path_complete(cp, RDS_CONN_CONNECTING); 192 } 193 new_sock = NULL; 194 ret = 0; 195 if (conn->c_npaths == 0) 196 rds_send_ping(cp->cp_conn, cp->cp_index); 197 goto out; 198 rst_nsk: 199 /* reset the newly returned accept sock and bail. 200 * It is safe to set linger on new_sock because the RDS connection 201 * has not been brought up on new_sock, so no RDS-level data could 202 * be pending on it. By setting linger, we achieve the side-effect 203 * of avoiding TIME_WAIT state on new_sock. 204 */ 205 rds_tcp_set_linger(new_sock); 206 kernel_sock_shutdown(new_sock, SHUT_RDWR); 207 ret = 0; 208 out: 209 if (rs_tcp) 210 mutex_unlock(&rs_tcp->t_conn_path_lock); 211 if (new_sock) 212 sock_release(new_sock); 213 return ret; 214 } 215 216 void rds_tcp_listen_data_ready(struct sock *sk) 217 { 218 void (*ready)(struct sock *sk); 219 220 rdsdebug("listen data ready sk %p\n", sk); 221 222 read_lock_bh(&sk->sk_callback_lock); 223 ready = sk->sk_user_data; 224 if (!ready) { /* check for teardown race */ 225 ready = sk->sk_data_ready; 226 goto out; 227 } 228 229 /* 230 * ->sk_data_ready is also called for a newly established child socket 231 * before it has been accepted and the accepter has set up their 232 * data_ready.. we only want to queue listen work for our listening 233 * socket 234 * 235 * (*ready)() may be null if we are racing with netns delete, and 236 * the listen socket is being torn down. 237 */ 238 if (sk->sk_state == TCP_LISTEN) 239 rds_tcp_accept_work(sk); 240 else 241 ready = rds_tcp_listen_sock_def_readable(sock_net(sk)); 242 243 out: 244 read_unlock_bh(&sk->sk_callback_lock); 245 if (ready) 246 ready(sk); 247 } 248 249 struct socket *rds_tcp_listen_init(struct net *net) 250 { 251 struct sockaddr_in sin; 252 struct socket *sock = NULL; 253 int ret; 254 255 ret = sock_create_kern(net, PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock); 256 if (ret < 0) 257 goto out; 258 259 sock->sk->sk_reuse = SK_CAN_REUSE; 260 rds_tcp_nonagle(sock); 261 262 write_lock_bh(&sock->sk->sk_callback_lock); 263 sock->sk->sk_user_data = sock->sk->sk_data_ready; 264 sock->sk->sk_data_ready = rds_tcp_listen_data_ready; 265 write_unlock_bh(&sock->sk->sk_callback_lock); 266 267 sin.sin_family = PF_INET; 268 sin.sin_addr.s_addr = (__force u32)htonl(INADDR_ANY); 269 sin.sin_port = (__force u16)htons(RDS_TCP_PORT); 270 271 ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); 272 if (ret < 0) 273 goto out; 274 275 ret = sock->ops->listen(sock, 64); 276 if (ret < 0) 277 goto out; 278 279 return sock; 280 out: 281 if (sock) 282 sock_release(sock); 283 return NULL; 284 } 285 286 void rds_tcp_listen_stop(struct socket *sock, struct work_struct *acceptor) 287 { 288 struct sock *sk; 289 290 if (!sock) 291 return; 292 293 sk = sock->sk; 294 295 /* serialize with and prevent further callbacks */ 296 lock_sock(sk); 297 write_lock_bh(&sk->sk_callback_lock); 298 if (sk->sk_user_data) { 299 sk->sk_data_ready = sk->sk_user_data; 300 sk->sk_user_data = NULL; 301 } 302 write_unlock_bh(&sk->sk_callback_lock); 303 release_sock(sk); 304 305 /* wait for accepts to stop and close the socket */ 306 flush_workqueue(rds_wq); 307 flush_work(acceptor); 308 sock_release(sock); 309 } 310