1 /* 2 * NET Generic infrastructure for Network protocols. 3 * 4 * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br> 5 * 6 * From code originally in include/net/tcp.h 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or (at your option) any later version. 12 */ 13 14 #include <linux/module.h> 15 #include <linux/random.h> 16 #include <linux/slab.h> 17 #include <linux/string.h> 18 #include <linux/tcp.h> 19 #include <linux/vmalloc.h> 20 21 #include <net/request_sock.h> 22 23 /* 24 * Maximum number of SYN_RECV sockets in queue per LISTEN socket. 25 * One SYN_RECV socket costs about 80bytes on a 32bit machine. 26 * It would be better to replace it with a global counter for all sockets 27 * but then some measure against one socket starving all other sockets 28 * would be needed. 29 * 30 * The minimum value of it is 128. Experiments with real servers show that 31 * it is absolutely not enough even at 100conn/sec. 256 cures most 32 * of problems. 33 * This value is adjusted to 128 for low memory machines, 34 * and it will increase in proportion to the memory of machine. 35 * Note : Dont forget somaxconn that may limit backlog too. 36 */ 37 int sysctl_max_syn_backlog = 256; 38 EXPORT_SYMBOL(sysctl_max_syn_backlog); 39 40 int reqsk_queue_alloc(struct request_sock_queue *queue, 41 unsigned int nr_table_entries) 42 { 43 size_t lopt_size = sizeof(struct listen_sock); 44 struct listen_sock *lopt = NULL; 45 46 nr_table_entries = min_t(u32, nr_table_entries, sysctl_max_syn_backlog); 47 nr_table_entries = max_t(u32, nr_table_entries, 8); 48 nr_table_entries = roundup_pow_of_two(nr_table_entries + 1); 49 lopt_size += nr_table_entries * sizeof(struct request_sock *); 50 51 if (lopt_size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) 52 lopt = kzalloc(lopt_size, GFP_KERNEL | 53 __GFP_NOWARN | 54 __GFP_NORETRY); 55 if (!lopt) 56 lopt = vzalloc(lopt_size); 57 if (!lopt) 58 return -ENOMEM; 59 60 get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd)); 61 spin_lock_init(&queue->syn_wait_lock); 62 63 spin_lock_init(&queue->fastopenq.lock); 64 queue->fastopenq.rskq_rst_head = NULL; 65 queue->fastopenq.rskq_rst_tail = NULL; 66 queue->fastopenq.qlen = 0; 67 queue->fastopenq.max_qlen = 0; 68 69 queue->rskq_accept_head = NULL; 70 lopt->nr_table_entries = nr_table_entries; 71 lopt->max_qlen_log = ilog2(nr_table_entries); 72 73 spin_lock_bh(&queue->syn_wait_lock); 74 queue->listen_opt = lopt; 75 spin_unlock_bh(&queue->syn_wait_lock); 76 77 return 0; 78 } 79 80 void __reqsk_queue_destroy(struct request_sock_queue *queue) 81 { 82 /* This is an error recovery path only, no locking needed */ 83 kvfree(queue->listen_opt); 84 } 85 86 static inline struct listen_sock *reqsk_queue_yank_listen_sk( 87 struct request_sock_queue *queue) 88 { 89 struct listen_sock *lopt; 90 91 spin_lock_bh(&queue->syn_wait_lock); 92 lopt = queue->listen_opt; 93 queue->listen_opt = NULL; 94 spin_unlock_bh(&queue->syn_wait_lock); 95 96 return lopt; 97 } 98 99 void reqsk_queue_destroy(struct request_sock_queue *queue) 100 { 101 /* make all the listen_opt local to us */ 102 struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue); 103 104 if (listen_sock_qlen(lopt) != 0) { 105 unsigned int i; 106 107 for (i = 0; i < lopt->nr_table_entries; i++) { 108 struct request_sock *req; 109 110 spin_lock_bh(&queue->syn_wait_lock); 111 while ((req = lopt->syn_table[i]) != NULL) { 112 lopt->syn_table[i] = req->dl_next; 113 /* Because of following del_timer_sync(), 114 * we must release the spinlock here 115 * or risk a dead lock. 116 */ 117 spin_unlock_bh(&queue->syn_wait_lock); 118 atomic_inc(&lopt->qlen_dec); 119 if (del_timer_sync(&req->rsk_timer)) 120 reqsk_put(req); 121 reqsk_put(req); 122 spin_lock_bh(&queue->syn_wait_lock); 123 } 124 spin_unlock_bh(&queue->syn_wait_lock); 125 } 126 } 127 128 if (WARN_ON(listen_sock_qlen(lopt) != 0)) 129 pr_err("qlen %u\n", listen_sock_qlen(lopt)); 130 kvfree(lopt); 131 } 132 133 /* 134 * This function is called to set a Fast Open socket's "fastopen_rsk" field 135 * to NULL when a TFO socket no longer needs to access the request_sock. 136 * This happens only after 3WHS has been either completed or aborted (e.g., 137 * RST is received). 138 * 139 * Before TFO, a child socket is created only after 3WHS is completed, 140 * hence it never needs to access the request_sock. things get a lot more 141 * complex with TFO. A child socket, accepted or not, has to access its 142 * request_sock for 3WHS processing, e.g., to retransmit SYN-ACK pkts, 143 * until 3WHS is either completed or aborted. Afterwards the req will stay 144 * until either the child socket is accepted, or in the rare case when the 145 * listener is closed before the child is accepted. 146 * 147 * In short, a request socket is only freed after BOTH 3WHS has completed 148 * (or aborted) and the child socket has been accepted (or listener closed). 149 * When a child socket is accepted, its corresponding req->sk is set to 150 * NULL since it's no longer needed. More importantly, "req->sk == NULL" 151 * will be used by the code below to determine if a child socket has been 152 * accepted or not, and the check is protected by the fastopenq->lock 153 * described below. 154 * 155 * Note that fastopen_rsk is only accessed from the child socket's context 156 * with its socket lock held. But a request_sock (req) can be accessed by 157 * both its child socket through fastopen_rsk, and a listener socket through 158 * icsk_accept_queue.rskq_accept_head. To protect the access a simple spin 159 * lock per listener "icsk->icsk_accept_queue.fastopenq->lock" is created. 160 * only in the rare case when both the listener and the child locks are held, 161 * e.g., in inet_csk_listen_stop() do we not need to acquire the lock. 162 * The lock also protects other fields such as fastopenq->qlen, which is 163 * decremented by this function when fastopen_rsk is no longer needed. 164 * 165 * Note that another solution was to simply use the existing socket lock 166 * from the listener. But first socket lock is difficult to use. It is not 167 * a simple spin lock - one must consider sock_owned_by_user() and arrange 168 * to use sk_add_backlog() stuff. But what really makes it infeasible is the 169 * locking hierarchy violation. E.g., inet_csk_listen_stop() may try to 170 * acquire a child's lock while holding listener's socket lock. A corner 171 * case might also exist in tcp_v4_hnd_req() that will trigger this locking 172 * order. 173 * 174 * This function also sets "treq->tfo_listener" to false. 175 * treq->tfo_listener is used by the listener so it is protected by the 176 * fastopenq->lock in this function. 177 */ 178 void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req, 179 bool reset) 180 { 181 struct sock *lsk = req->rsk_listener; 182 struct fastopen_queue *fastopenq; 183 184 fastopenq = &inet_csk(lsk)->icsk_accept_queue.fastopenq; 185 186 tcp_sk(sk)->fastopen_rsk = NULL; 187 spin_lock_bh(&fastopenq->lock); 188 fastopenq->qlen--; 189 tcp_rsk(req)->tfo_listener = false; 190 if (req->sk) /* the child socket hasn't been accepted yet */ 191 goto out; 192 193 if (!reset || lsk->sk_state != TCP_LISTEN) { 194 /* If the listener has been closed don't bother with the 195 * special RST handling below. 196 */ 197 spin_unlock_bh(&fastopenq->lock); 198 reqsk_put(req); 199 return; 200 } 201 /* Wait for 60secs before removing a req that has triggered RST. 202 * This is a simple defense against TFO spoofing attack - by 203 * counting the req against fastopen.max_qlen, and disabling 204 * TFO when the qlen exceeds max_qlen. 205 * 206 * For more details see CoNext'11 "TCP Fast Open" paper. 207 */ 208 req->rsk_timer.expires = jiffies + 60*HZ; 209 if (fastopenq->rskq_rst_head == NULL) 210 fastopenq->rskq_rst_head = req; 211 else 212 fastopenq->rskq_rst_tail->dl_next = req; 213 214 req->dl_next = NULL; 215 fastopenq->rskq_rst_tail = req; 216 fastopenq->qlen++; 217 out: 218 spin_unlock_bh(&fastopenq->lock); 219 } 220