xref: /openbmc/linux/net/rds/tcp_listen.c (revision c0e297dc)
1 /*
2  * Copyright (c) 2006 Oracle.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/kernel.h>
34 #include <linux/gfp.h>
35 #include <linux/in.h>
36 #include <net/tcp.h>
37 
38 #include "rds.h"
39 #include "tcp.h"
40 
41 /*
42  * cheesy, but simple..
43  */
44 static void rds_tcp_accept_worker(struct work_struct *work);
45 static DECLARE_WORK(rds_tcp_listen_work, rds_tcp_accept_worker);
46 static struct socket *rds_tcp_listen_sock;
47 
48 static int rds_tcp_keepalive(struct socket *sock)
49 {
50 	/* values below based on xs_udp_default_timeout */
51 	int keepidle = 5; /* send a probe 'keepidle' secs after last data */
52 	int keepcnt = 5; /* number of unack'ed probes before declaring dead */
53 	int keepalive = 1;
54 	int ret = 0;
55 
56 	ret = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
57 				(char *)&keepalive, sizeof(keepalive));
58 	if (ret < 0)
59 		goto bail;
60 
61 	ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_KEEPCNT,
62 				(char *)&keepcnt, sizeof(keepcnt));
63 	if (ret < 0)
64 		goto bail;
65 
66 	ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_KEEPIDLE,
67 				(char *)&keepidle, sizeof(keepidle));
68 	if (ret < 0)
69 		goto bail;
70 
71 	/* KEEPINTVL is the interval between successive probes. We follow
72 	 * the model in xs_tcp_finish_connecting() and re-use keepidle.
73 	 */
74 	ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_KEEPINTVL,
75 				(char *)&keepidle, sizeof(keepidle));
76 bail:
77 	return ret;
78 }
79 
80 static int rds_tcp_accept_one(struct socket *sock)
81 {
82 	struct socket *new_sock = NULL;
83 	struct rds_connection *conn;
84 	int ret;
85 	struct inet_sock *inet;
86 	struct rds_tcp_connection *rs_tcp;
87 
88 	ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type,
89 			       sock->sk->sk_protocol, &new_sock);
90 	if (ret)
91 		goto out;
92 
93 	new_sock->type = sock->type;
94 	new_sock->ops = sock->ops;
95 	ret = sock->ops->accept(sock, new_sock, O_NONBLOCK);
96 	if (ret < 0)
97 		goto out;
98 
99 	ret = rds_tcp_keepalive(new_sock);
100 	if (ret < 0)
101 		goto out;
102 
103 	rds_tcp_tune(new_sock);
104 
105 	inet = inet_sk(new_sock->sk);
106 
107 	rdsdebug("accepted tcp %pI4:%u -> %pI4:%u\n",
108 		 &inet->inet_saddr, ntohs(inet->inet_sport),
109 		 &inet->inet_daddr, ntohs(inet->inet_dport));
110 
111 	conn = rds_conn_create(inet->inet_saddr, inet->inet_daddr,
112 			       &rds_tcp_transport, GFP_KERNEL);
113 	if (IS_ERR(conn)) {
114 		ret = PTR_ERR(conn);
115 		goto out;
116 	}
117 	/* An incoming SYN request came in, and TCP just accepted it.
118 	 * We always create a new conn for listen side of TCP, and do not
119 	 * add it to the c_hash_list.
120 	 *
121 	 * If the client reboots, this conn will need to be cleaned up.
122 	 * rds_tcp_state_change() will do that cleanup
123 	 */
124 	rs_tcp = (struct rds_tcp_connection *)conn->c_transport_data;
125 	WARN_ON(!rs_tcp || rs_tcp->t_sock);
126 
127 	/*
128 	 * see the comment above rds_queue_delayed_reconnect()
129 	 */
130 	if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) {
131 		if (rds_conn_state(conn) == RDS_CONN_UP)
132 			rds_tcp_stats_inc(s_tcp_listen_closed_stale);
133 		else
134 			rds_tcp_stats_inc(s_tcp_connect_raced);
135 		rds_conn_drop(conn);
136 		ret = 0;
137 		goto out;
138 	}
139 
140 	rds_tcp_set_callbacks(new_sock, conn);
141 	rds_connect_complete(conn);
142 	new_sock = NULL;
143 	ret = 0;
144 
145 out:
146 	if (new_sock)
147 		sock_release(new_sock);
148 	return ret;
149 }
150 
151 static void rds_tcp_accept_worker(struct work_struct *work)
152 {
153 	while (rds_tcp_accept_one(rds_tcp_listen_sock) == 0)
154 		cond_resched();
155 }
156 
157 void rds_tcp_listen_data_ready(struct sock *sk)
158 {
159 	void (*ready)(struct sock *sk);
160 
161 	rdsdebug("listen data ready sk %p\n", sk);
162 
163 	read_lock(&sk->sk_callback_lock);
164 	ready = sk->sk_user_data;
165 	if (!ready) { /* check for teardown race */
166 		ready = sk->sk_data_ready;
167 		goto out;
168 	}
169 
170 	/*
171 	 * ->sk_data_ready is also called for a newly established child socket
172 	 * before it has been accepted and the accepter has set up their
173 	 * data_ready.. we only want to queue listen work for our listening
174 	 * socket
175 	 */
176 	if (sk->sk_state == TCP_LISTEN)
177 		queue_work(rds_wq, &rds_tcp_listen_work);
178 
179 out:
180 	read_unlock(&sk->sk_callback_lock);
181 	ready(sk);
182 }
183 
184 int rds_tcp_listen_init(void)
185 {
186 	struct sockaddr_in sin;
187 	struct socket *sock = NULL;
188 	int ret;
189 
190 	ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
191 	if (ret < 0)
192 		goto out;
193 
194 	sock->sk->sk_reuse = SK_CAN_REUSE;
195 	rds_tcp_nonagle(sock);
196 
197 	write_lock_bh(&sock->sk->sk_callback_lock);
198 	sock->sk->sk_user_data = sock->sk->sk_data_ready;
199 	sock->sk->sk_data_ready = rds_tcp_listen_data_ready;
200 	write_unlock_bh(&sock->sk->sk_callback_lock);
201 
202 	sin.sin_family = PF_INET;
203 	sin.sin_addr.s_addr = (__force u32)htonl(INADDR_ANY);
204 	sin.sin_port = (__force u16)htons(RDS_TCP_PORT);
205 
206 	ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
207 	if (ret < 0)
208 		goto out;
209 
210 	ret = sock->ops->listen(sock, 64);
211 	if (ret < 0)
212 		goto out;
213 
214 	rds_tcp_listen_sock = sock;
215 	sock = NULL;
216 out:
217 	if (sock)
218 		sock_release(sock);
219 	return ret;
220 }
221 
222 void rds_tcp_listen_stop(void)
223 {
224 	struct socket *sock = rds_tcp_listen_sock;
225 	struct sock *sk;
226 
227 	if (!sock)
228 		return;
229 
230 	sk = sock->sk;
231 
232 	/* serialize with and prevent further callbacks */
233 	lock_sock(sk);
234 	write_lock_bh(&sk->sk_callback_lock);
235 	if (sk->sk_user_data) {
236 		sk->sk_data_ready = sk->sk_user_data;
237 		sk->sk_user_data = NULL;
238 	}
239 	write_unlock_bh(&sk->sk_callback_lock);
240 	release_sock(sk);
241 
242 	/* wait for accepts to stop and close the socket */
243 	flush_workqueue(rds_wq);
244 	sock_release(sock);
245 	rds_tcp_listen_sock = NULL;
246 }
247