1 /* 2 * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 #include <linux/kernel.h> 34 #include <linux/gfp.h> 35 #include <linux/in.h> 36 #include <net/tcp.h> 37 38 #include "rds.h" 39 #include "tcp.h" 40 41 void rds_tcp_keepalive(struct socket *sock) 42 { 43 /* values below based on xs_udp_default_timeout */ 44 int keepidle = 5; /* send a probe 'keepidle' secs after last data */ 45 int keepcnt = 5; /* number of unack'ed probes before declaring dead */ 46 47 sock_set_keepalive(sock->sk); 48 tcp_sock_set_keepcnt(sock->sk, keepcnt); 49 tcp_sock_set_keepidle(sock->sk, keepidle); 50 /* KEEPINTVL is the interval between successive probes. We follow 51 * the model in xs_tcp_finish_connecting() and re-use keepidle. 52 */ 53 tcp_sock_set_keepintvl(sock->sk, keepidle); 54 } 55 56 /* rds_tcp_accept_one_path(): if accepting on cp_index > 0, make sure the 57 * client's ipaddr < server's ipaddr. Otherwise, close the accepted 58 * socket and force a reconneect from smaller -> larger ip addr. The reason 59 * we special case cp_index 0 is to allow the rds probe ping itself to itself 60 * get through efficiently. 61 * Since reconnects are only initiated from the node with the numerically 62 * smaller ip address, we recycle conns in RDS_CONN_ERROR on the passive side 63 * by moving them to CONNECTING in this function. 64 */ 65 static 66 struct rds_tcp_connection *rds_tcp_accept_one_path(struct rds_connection *conn) 67 { 68 int i; 69 int npaths = max_t(int, 1, conn->c_npaths); 70 71 /* for mprds, all paths MUST be initiated by the peer 72 * with the smaller address. 73 */ 74 if (rds_addr_cmp(&conn->c_faddr, &conn->c_laddr) >= 0) { 75 /* Make sure we initiate at least one path if this 76 * has not already been done; rds_start_mprds() will 77 * take care of additional paths, if necessary. 78 */ 79 if (npaths == 1) 80 rds_conn_path_connect_if_down(&conn->c_path[0]); 81 return NULL; 82 } 83 84 for (i = 0; i < npaths; i++) { 85 struct rds_conn_path *cp = &conn->c_path[i]; 86 87 if (rds_conn_path_transition(cp, RDS_CONN_DOWN, 88 RDS_CONN_CONNECTING) || 89 rds_conn_path_transition(cp, RDS_CONN_ERROR, 90 RDS_CONN_CONNECTING)) { 91 return cp->cp_transport_data; 92 } 93 } 94 return NULL; 95 } 96 97 int rds_tcp_accept_one(struct socket *sock) 98 { 99 struct socket *new_sock = NULL; 100 struct rds_connection *conn; 101 int ret; 102 struct inet_sock *inet; 103 struct rds_tcp_connection *rs_tcp = NULL; 104 int conn_state; 105 struct rds_conn_path *cp; 106 struct in6_addr *my_addr, *peer_addr; 107 #if !IS_ENABLED(CONFIG_IPV6) 108 struct in6_addr saddr, daddr; 109 #endif 110 int dev_if = 0; 111 112 if (!sock) /* module unload or netns delete in progress */ 113 return -ENETUNREACH; 114 115 ret = sock_create_lite(sock->sk->sk_family, 116 sock->sk->sk_type, sock->sk->sk_protocol, 117 &new_sock); 118 if (ret) 119 goto out; 120 121 ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, true); 122 if (ret < 0) 123 goto out; 124 125 /* sock_create_lite() does not get a hold on the owner module so we 126 * need to do it here. Note that sock_release() uses sock->ops to 127 * determine if it needs to decrement the reference count. So set 128 * sock->ops after calling accept() in case that fails. And there's 129 * no need to do try_module_get() as the listener should have a hold 130 * already. 131 */ 132 new_sock->ops = sock->ops; 133 __module_get(new_sock->ops->owner); 134 135 rds_tcp_keepalive(new_sock); 136 rds_tcp_tune(new_sock); 137 138 inet = inet_sk(new_sock->sk); 139 140 #if IS_ENABLED(CONFIG_IPV6) 141 my_addr = &new_sock->sk->sk_v6_rcv_saddr; 142 peer_addr = &new_sock->sk->sk_v6_daddr; 143 #else 144 ipv6_addr_set_v4mapped(inet->inet_saddr, &saddr); 145 ipv6_addr_set_v4mapped(inet->inet_daddr, &daddr); 146 my_addr = &saddr; 147 peer_addr = &daddr; 148 #endif 149 rdsdebug("accepted family %d tcp %pI6c:%u -> %pI6c:%u\n", 150 sock->sk->sk_family, 151 my_addr, ntohs(inet->inet_sport), 152 peer_addr, ntohs(inet->inet_dport)); 153 154 #if IS_ENABLED(CONFIG_IPV6) 155 /* sk_bound_dev_if is not set if the peer address is not link local 156 * address. In this case, it happens that mcast_oif is set. So 157 * just use it. 158 */ 159 if ((ipv6_addr_type(my_addr) & IPV6_ADDR_LINKLOCAL) && 160 !(ipv6_addr_type(peer_addr) & IPV6_ADDR_LINKLOCAL)) { 161 struct ipv6_pinfo *inet6; 162 163 inet6 = inet6_sk(new_sock->sk); 164 dev_if = inet6->mcast_oif; 165 } else { 166 dev_if = new_sock->sk->sk_bound_dev_if; 167 } 168 #endif 169 170 conn = rds_conn_create(sock_net(sock->sk), 171 my_addr, peer_addr, 172 &rds_tcp_transport, 0, GFP_KERNEL, dev_if); 173 174 if (IS_ERR(conn)) { 175 ret = PTR_ERR(conn); 176 goto out; 177 } 178 /* An incoming SYN request came in, and TCP just accepted it. 179 * 180 * If the client reboots, this conn will need to be cleaned up. 181 * rds_tcp_state_change() will do that cleanup 182 */ 183 rs_tcp = rds_tcp_accept_one_path(conn); 184 if (!rs_tcp) 185 goto rst_nsk; 186 mutex_lock(&rs_tcp->t_conn_path_lock); 187 cp = rs_tcp->t_cpath; 188 conn_state = rds_conn_path_state(cp); 189 WARN_ON(conn_state == RDS_CONN_UP); 190 if (conn_state != RDS_CONN_CONNECTING && conn_state != RDS_CONN_ERROR) 191 goto rst_nsk; 192 if (rs_tcp->t_sock) { 193 /* Duelling SYN has been handled in rds_tcp_accept_one() */ 194 rds_tcp_reset_callbacks(new_sock, cp); 195 /* rds_connect_path_complete() marks RDS_CONN_UP */ 196 rds_connect_path_complete(cp, RDS_CONN_RESETTING); 197 } else { 198 rds_tcp_set_callbacks(new_sock, cp); 199 rds_connect_path_complete(cp, RDS_CONN_CONNECTING); 200 } 201 new_sock = NULL; 202 ret = 0; 203 if (conn->c_npaths == 0) 204 rds_send_ping(cp->cp_conn, cp->cp_index); 205 goto out; 206 rst_nsk: 207 /* reset the newly returned accept sock and bail. 208 * It is safe to set linger on new_sock because the RDS connection 209 * has not been brought up on new_sock, so no RDS-level data could 210 * be pending on it. By setting linger, we achieve the side-effect 211 * of avoiding TIME_WAIT state on new_sock. 212 */ 213 sock_no_linger(new_sock->sk); 214 kernel_sock_shutdown(new_sock, SHUT_RDWR); 215 ret = 0; 216 out: 217 if (rs_tcp) 218 mutex_unlock(&rs_tcp->t_conn_path_lock); 219 if (new_sock) 220 sock_release(new_sock); 221 return ret; 222 } 223 224 void rds_tcp_listen_data_ready(struct sock *sk) 225 { 226 void (*ready)(struct sock *sk); 227 228 rdsdebug("listen data ready sk %p\n", sk); 229 230 read_lock_bh(&sk->sk_callback_lock); 231 ready = sk->sk_user_data; 232 if (!ready) { /* check for teardown race */ 233 ready = sk->sk_data_ready; 234 goto out; 235 } 236 237 /* 238 * ->sk_data_ready is also called for a newly established child socket 239 * before it has been accepted and the accepter has set up their 240 * data_ready.. we only want to queue listen work for our listening 241 * socket 242 * 243 * (*ready)() may be null if we are racing with netns delete, and 244 * the listen socket is being torn down. 245 */ 246 if (sk->sk_state == TCP_LISTEN) 247 rds_tcp_accept_work(sk); 248 else 249 ready = rds_tcp_listen_sock_def_readable(sock_net(sk)); 250 251 out: 252 read_unlock_bh(&sk->sk_callback_lock); 253 if (ready) 254 ready(sk); 255 } 256 257 struct socket *rds_tcp_listen_init(struct net *net, bool isv6) 258 { 259 struct socket *sock = NULL; 260 struct sockaddr_storage ss; 261 struct sockaddr_in6 *sin6; 262 struct sockaddr_in *sin; 263 int addr_len; 264 int ret; 265 266 ret = sock_create_kern(net, isv6 ? PF_INET6 : PF_INET, SOCK_STREAM, 267 IPPROTO_TCP, &sock); 268 if (ret < 0) { 269 rdsdebug("could not create %s listener socket: %d\n", 270 isv6 ? "IPv6" : "IPv4", ret); 271 goto out; 272 } 273 274 sock->sk->sk_reuse = SK_CAN_REUSE; 275 tcp_sock_set_nodelay(sock->sk); 276 277 write_lock_bh(&sock->sk->sk_callback_lock); 278 sock->sk->sk_user_data = sock->sk->sk_data_ready; 279 sock->sk->sk_data_ready = rds_tcp_listen_data_ready; 280 write_unlock_bh(&sock->sk->sk_callback_lock); 281 282 if (isv6) { 283 sin6 = (struct sockaddr_in6 *)&ss; 284 sin6->sin6_family = PF_INET6; 285 sin6->sin6_addr = in6addr_any; 286 sin6->sin6_port = (__force u16)htons(RDS_TCP_PORT); 287 sin6->sin6_scope_id = 0; 288 sin6->sin6_flowinfo = 0; 289 addr_len = sizeof(*sin6); 290 } else { 291 sin = (struct sockaddr_in *)&ss; 292 sin->sin_family = PF_INET; 293 sin->sin_addr.s_addr = INADDR_ANY; 294 sin->sin_port = (__force u16)htons(RDS_TCP_PORT); 295 addr_len = sizeof(*sin); 296 } 297 298 ret = sock->ops->bind(sock, (struct sockaddr *)&ss, addr_len); 299 if (ret < 0) { 300 rdsdebug("could not bind %s listener socket: %d\n", 301 isv6 ? "IPv6" : "IPv4", ret); 302 goto out; 303 } 304 305 ret = sock->ops->listen(sock, 64); 306 if (ret < 0) 307 goto out; 308 309 return sock; 310 out: 311 if (sock) 312 sock_release(sock); 313 return NULL; 314 } 315 316 void rds_tcp_listen_stop(struct socket *sock, struct work_struct *acceptor) 317 { 318 struct sock *sk; 319 320 if (!sock) 321 return; 322 323 sk = sock->sk; 324 325 /* serialize with and prevent further callbacks */ 326 lock_sock(sk); 327 write_lock_bh(&sk->sk_callback_lock); 328 if (sk->sk_user_data) { 329 sk->sk_data_ready = sk->sk_user_data; 330 sk->sk_user_data = NULL; 331 } 332 write_unlock_bh(&sk->sk_callback_lock); 333 release_sock(sk); 334 335 /* wait for accepts to stop and close the socket */ 336 flush_workqueue(rds_wq); 337 flush_work(acceptor); 338 sock_release(sock); 339 } 340