xref: /openbmc/linux/net/core/request_sock.c (revision 4f3db074)
1 /*
2  * NET		Generic infrastructure for Network protocols.
3  *
4  * Authors:	Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5  *
6  * 		From code originally in include/net/tcp.h
7  *
8  *		This program is free software; you can redistribute it and/or
9  *		modify it under the terms of the GNU General Public License
10  *		as published by the Free Software Foundation; either version
11  *		2 of the License, or (at your option) any later version.
12  */
13 
14 #include <linux/module.h>
15 #include <linux/random.h>
16 #include <linux/slab.h>
17 #include <linux/string.h>
18 #include <linux/tcp.h>
19 #include <linux/vmalloc.h>
20 
21 #include <net/request_sock.h>
22 
23 /*
24  * Maximum number of SYN_RECV sockets in queue per LISTEN socket.
25  * One SYN_RECV socket costs about 80bytes on a 32bit machine.
26  * It would be better to replace it with a global counter for all sockets
27  * but then some measure against one socket starving all other sockets
28  * would be needed.
29  *
30  * The minimum value of it is 128. Experiments with real servers show that
31  * it is absolutely not enough even at 100conn/sec. 256 cures most
32  * of problems.
33  * This value is adjusted to 128 for low memory machines,
34  * and it will increase in proportion to the memory of machine.
35  * Note : Dont forget somaxconn that may limit backlog too.
36  */
37 int sysctl_max_syn_backlog = 256;
38 EXPORT_SYMBOL(sysctl_max_syn_backlog);
39 
40 int reqsk_queue_alloc(struct request_sock_queue *queue,
41 		      unsigned int nr_table_entries)
42 {
43 	size_t lopt_size = sizeof(struct listen_sock);
44 	struct listen_sock *lopt = NULL;
45 
46 	nr_table_entries = min_t(u32, nr_table_entries, sysctl_max_syn_backlog);
47 	nr_table_entries = max_t(u32, nr_table_entries, 8);
48 	nr_table_entries = roundup_pow_of_two(nr_table_entries + 1);
49 	lopt_size += nr_table_entries * sizeof(struct request_sock *);
50 
51 	if (lopt_size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
52 		lopt = kzalloc(lopt_size, GFP_KERNEL |
53 					  __GFP_NOWARN |
54 					  __GFP_NORETRY);
55 	if (!lopt)
56 		lopt = vzalloc(lopt_size);
57 	if (!lopt)
58 		return -ENOMEM;
59 
60 	get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
61 	spin_lock_init(&queue->syn_wait_lock);
62 	queue->rskq_accept_head = NULL;
63 	lopt->nr_table_entries = nr_table_entries;
64 	lopt->max_qlen_log = ilog2(nr_table_entries);
65 
66 	spin_lock_bh(&queue->syn_wait_lock);
67 	queue->listen_opt = lopt;
68 	spin_unlock_bh(&queue->syn_wait_lock);
69 
70 	return 0;
71 }
72 
73 void __reqsk_queue_destroy(struct request_sock_queue *queue)
74 {
75 	/* This is an error recovery path only, no locking needed */
76 	kvfree(queue->listen_opt);
77 }
78 
79 static inline struct listen_sock *reqsk_queue_yank_listen_sk(
80 		struct request_sock_queue *queue)
81 {
82 	struct listen_sock *lopt;
83 
84 	spin_lock_bh(&queue->syn_wait_lock);
85 	lopt = queue->listen_opt;
86 	queue->listen_opt = NULL;
87 	spin_unlock_bh(&queue->syn_wait_lock);
88 
89 	return lopt;
90 }
91 
92 void reqsk_queue_destroy(struct request_sock_queue *queue)
93 {
94 	/* make all the listen_opt local to us */
95 	struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue);
96 
97 	if (listen_sock_qlen(lopt) != 0) {
98 		unsigned int i;
99 
100 		for (i = 0; i < lopt->nr_table_entries; i++) {
101 			struct request_sock *req;
102 
103 			spin_lock_bh(&queue->syn_wait_lock);
104 			while ((req = lopt->syn_table[i]) != NULL) {
105 				lopt->syn_table[i] = req->dl_next;
106 				atomic_inc(&lopt->qlen_dec);
107 				if (del_timer(&req->rsk_timer))
108 					reqsk_put(req);
109 				reqsk_put(req);
110 			}
111 			spin_unlock_bh(&queue->syn_wait_lock);
112 		}
113 	}
114 
115 	if (WARN_ON(listen_sock_qlen(lopt) != 0))
116 		pr_err("qlen %u\n", listen_sock_qlen(lopt));
117 	kvfree(lopt);
118 }
119 
120 /*
121  * This function is called to set a Fast Open socket's "fastopen_rsk" field
122  * to NULL when a TFO socket no longer needs to access the request_sock.
123  * This happens only after 3WHS has been either completed or aborted (e.g.,
124  * RST is received).
125  *
126  * Before TFO, a child socket is created only after 3WHS is completed,
127  * hence it never needs to access the request_sock. things get a lot more
128  * complex with TFO. A child socket, accepted or not, has to access its
129  * request_sock for 3WHS processing, e.g., to retransmit SYN-ACK pkts,
130  * until 3WHS is either completed or aborted. Afterwards the req will stay
131  * until either the child socket is accepted, or in the rare case when the
132  * listener is closed before the child is accepted.
133  *
134  * In short, a request socket is only freed after BOTH 3WHS has completed
135  * (or aborted) and the child socket has been accepted (or listener closed).
136  * When a child socket is accepted, its corresponding req->sk is set to
137  * NULL since it's no longer needed. More importantly, "req->sk == NULL"
138  * will be used by the code below to determine if a child socket has been
139  * accepted or not, and the check is protected by the fastopenq->lock
140  * described below.
141  *
142  * Note that fastopen_rsk is only accessed from the child socket's context
143  * with its socket lock held. But a request_sock (req) can be accessed by
144  * both its child socket through fastopen_rsk, and a listener socket through
145  * icsk_accept_queue.rskq_accept_head. To protect the access a simple spin
146  * lock per listener "icsk->icsk_accept_queue.fastopenq->lock" is created.
147  * only in the rare case when both the listener and the child locks are held,
148  * e.g., in inet_csk_listen_stop() do we not need to acquire the lock.
149  * The lock also protects other fields such as fastopenq->qlen, which is
150  * decremented by this function when fastopen_rsk is no longer needed.
151  *
152  * Note that another solution was to simply use the existing socket lock
153  * from the listener. But first socket lock is difficult to use. It is not
154  * a simple spin lock - one must consider sock_owned_by_user() and arrange
155  * to use sk_add_backlog() stuff. But what really makes it infeasible is the
156  * locking hierarchy violation. E.g., inet_csk_listen_stop() may try to
157  * acquire a child's lock while holding listener's socket lock. A corner
158  * case might also exist in tcp_v4_hnd_req() that will trigger this locking
159  * order.
160  *
161  * This function also sets "treq->tfo_listener" to false.
162  * treq->tfo_listener is used by the listener so it is protected by the
163  * fastopenq->lock in this function.
164  */
165 void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
166 			   bool reset)
167 {
168 	struct sock *lsk = req->rsk_listener;
169 	struct fastopen_queue *fastopenq;
170 
171 	fastopenq = inet_csk(lsk)->icsk_accept_queue.fastopenq;
172 
173 	tcp_sk(sk)->fastopen_rsk = NULL;
174 	spin_lock_bh(&fastopenq->lock);
175 	fastopenq->qlen--;
176 	tcp_rsk(req)->tfo_listener = false;
177 	if (req->sk)	/* the child socket hasn't been accepted yet */
178 		goto out;
179 
180 	if (!reset || lsk->sk_state != TCP_LISTEN) {
181 		/* If the listener has been closed don't bother with the
182 		 * special RST handling below.
183 		 */
184 		spin_unlock_bh(&fastopenq->lock);
185 		reqsk_put(req);
186 		return;
187 	}
188 	/* Wait for 60secs before removing a req that has triggered RST.
189 	 * This is a simple defense against TFO spoofing attack - by
190 	 * counting the req against fastopen.max_qlen, and disabling
191 	 * TFO when the qlen exceeds max_qlen.
192 	 *
193 	 * For more details see CoNext'11 "TCP Fast Open" paper.
194 	 */
195 	req->rsk_timer.expires = jiffies + 60*HZ;
196 	if (fastopenq->rskq_rst_head == NULL)
197 		fastopenq->rskq_rst_head = req;
198 	else
199 		fastopenq->rskq_rst_tail->dl_next = req;
200 
201 	req->dl_next = NULL;
202 	fastopenq->rskq_rst_tail = req;
203 	fastopenq->qlen++;
204 out:
205 	spin_unlock_bh(&fastopenq->lock);
206 }
207