1 #include <linux/err.h> 2 #include <linux/init.h> 3 #include <linux/kernel.h> 4 #include <linux/list.h> 5 #include <linux/tcp.h> 6 #include <linux/rcupdate.h> 7 #include <linux/rculist.h> 8 #include <net/inetpeer.h> 9 #include <net/tcp.h> 10 11 int sysctl_tcp_fastopen __read_mostly = TFO_CLIENT_ENABLE; 12 13 struct tcp_fastopen_context __rcu *tcp_fastopen_ctx; 14 15 static DEFINE_SPINLOCK(tcp_fastopen_ctx_lock); 16 17 void tcp_fastopen_init_key_once(bool publish) 18 { 19 static u8 key[TCP_FASTOPEN_KEY_LENGTH]; 20 21 /* tcp_fastopen_reset_cipher publishes the new context 22 * atomically, so we allow this race happening here. 23 * 24 * All call sites of tcp_fastopen_cookie_gen also check 25 * for a valid cookie, so this is an acceptable risk. 26 */ 27 if (net_get_random_once(key, sizeof(key)) && publish) 28 tcp_fastopen_reset_cipher(key, sizeof(key)); 29 } 30 31 static void tcp_fastopen_ctx_free(struct rcu_head *head) 32 { 33 struct tcp_fastopen_context *ctx = 34 container_of(head, struct tcp_fastopen_context, rcu); 35 crypto_free_cipher(ctx->tfm); 36 kfree(ctx); 37 } 38 39 int tcp_fastopen_reset_cipher(void *key, unsigned int len) 40 { 41 int err; 42 struct tcp_fastopen_context *ctx, *octx; 43 44 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 45 if (!ctx) 46 return -ENOMEM; 47 ctx->tfm = crypto_alloc_cipher("aes", 0, 0); 48 49 if (IS_ERR(ctx->tfm)) { 50 err = PTR_ERR(ctx->tfm); 51 error: kfree(ctx); 52 pr_err("TCP: TFO aes cipher alloc error: %d\n", err); 53 return err; 54 } 55 err = crypto_cipher_setkey(ctx->tfm, key, len); 56 if (err) { 57 pr_err("TCP: TFO cipher key error: %d\n", err); 58 crypto_free_cipher(ctx->tfm); 59 goto error; 60 } 61 memcpy(ctx->key, key, len); 62 63 spin_lock(&tcp_fastopen_ctx_lock); 64 65 octx = rcu_dereference_protected(tcp_fastopen_ctx, 66 lockdep_is_held(&tcp_fastopen_ctx_lock)); 67 rcu_assign_pointer(tcp_fastopen_ctx, ctx); 68 spin_unlock(&tcp_fastopen_ctx_lock); 69 70 if (octx) 71 call_rcu(&octx->rcu, tcp_fastopen_ctx_free); 72 return err; 73 } 74 75 static bool __tcp_fastopen_cookie_gen(const void *path, 76 struct tcp_fastopen_cookie *foc) 77 { 78 struct tcp_fastopen_context *ctx; 79 bool ok = false; 80 81 tcp_fastopen_init_key_once(true); 82 83 rcu_read_lock(); 84 ctx = rcu_dereference(tcp_fastopen_ctx); 85 if (ctx) { 86 crypto_cipher_encrypt_one(ctx->tfm, foc->val, path); 87 foc->len = TCP_FASTOPEN_COOKIE_SIZE; 88 ok = true; 89 } 90 rcu_read_unlock(); 91 return ok; 92 } 93 94 /* Generate the fastopen cookie by doing aes128 encryption on both 95 * the source and destination addresses. Pad 0s for IPv4 or IPv4-mapped-IPv6 96 * addresses. For the longer IPv6 addresses use CBC-MAC. 97 * 98 * XXX (TFO) - refactor when TCP_FASTOPEN_COOKIE_SIZE != AES_BLOCK_SIZE. 99 */ 100 static bool tcp_fastopen_cookie_gen(struct request_sock *req, 101 struct sk_buff *syn, 102 struct tcp_fastopen_cookie *foc) 103 { 104 if (req->rsk_ops->family == AF_INET) { 105 const struct iphdr *iph = ip_hdr(syn); 106 107 __be32 path[4] = { iph->saddr, iph->daddr, 0, 0 }; 108 return __tcp_fastopen_cookie_gen(path, foc); 109 } 110 111 #if IS_ENABLED(CONFIG_IPV6) 112 if (req->rsk_ops->family == AF_INET6) { 113 const struct ipv6hdr *ip6h = ipv6_hdr(syn); 114 struct tcp_fastopen_cookie tmp; 115 116 if (__tcp_fastopen_cookie_gen(&ip6h->saddr, &tmp)) { 117 struct in6_addr *buf = (struct in6_addr *) tmp.val; 118 int i; 119 120 for (i = 0; i < 4; i++) 121 buf->s6_addr32[i] ^= ip6h->daddr.s6_addr32[i]; 122 return __tcp_fastopen_cookie_gen(buf, foc); 123 } 124 } 125 #endif 126 return false; 127 } 128 129 static bool tcp_fastopen_create_child(struct sock *sk, 130 struct sk_buff *skb, 131 struct dst_entry *dst, 132 struct request_sock *req) 133 { 134 struct tcp_sock *tp; 135 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; 136 struct sock *child; 137 u32 end_seq; 138 139 req->num_retrans = 0; 140 req->num_timeout = 0; 141 req->sk = NULL; 142 143 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL); 144 if (child == NULL) 145 return false; 146 147 spin_lock(&queue->fastopenq->lock); 148 queue->fastopenq->qlen++; 149 spin_unlock(&queue->fastopenq->lock); 150 151 /* Initialize the child socket. Have to fix some values to take 152 * into account the child is a Fast Open socket and is created 153 * only out of the bits carried in the SYN packet. 154 */ 155 tp = tcp_sk(child); 156 157 tp->fastopen_rsk = req; 158 /* Do a hold on the listner sk so that if the listener is being 159 * closed, the child that has been accepted can live on and still 160 * access listen_lock. 161 */ 162 sock_hold(sk); 163 tcp_rsk(req)->listener = sk; 164 165 /* RFC1323: The window in SYN & SYN/ACK segments is never 166 * scaled. So correct it appropriately. 167 */ 168 tp->snd_wnd = ntohs(tcp_hdr(skb)->window); 169 170 /* Activate the retrans timer so that SYNACK can be retransmitted. 171 * The request socket is not added to the SYN table of the parent 172 * because it's been added to the accept queue directly. 173 */ 174 inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS, 175 TCP_TIMEOUT_INIT, TCP_RTO_MAX); 176 177 /* Add the child socket directly into the accept queue */ 178 inet_csk_reqsk_queue_add(sk, req, child); 179 180 /* Now finish processing the fastopen child socket. */ 181 inet_csk(child)->icsk_af_ops->rebuild_header(child); 182 tcp_init_congestion_control(child); 183 tcp_mtup_init(child); 184 tcp_init_metrics(child); 185 tcp_init_buffer_space(child); 186 187 /* Queue the data carried in the SYN packet. We need to first 188 * bump skb's refcnt because the caller will attempt to free it. 189 * Note that IPv6 might also have used skb_get() trick 190 * in tcp_v6_conn_request() to keep this SYN around (treq->pktopts) 191 * So we need to eventually get a clone of the packet, 192 * before inserting it in sk_receive_queue. 193 * 194 * XXX (TFO) - we honor a zero-payload TFO request for now, 195 * (any reason not to?) but no need to queue the skb since 196 * there is no data. How about SYN+FIN? 197 */ 198 end_seq = TCP_SKB_CB(skb)->end_seq; 199 if (end_seq != TCP_SKB_CB(skb)->seq + 1) { 200 struct sk_buff *skb2; 201 202 if (unlikely(skb_shared(skb))) 203 skb2 = skb_clone(skb, GFP_ATOMIC); 204 else 205 skb2 = skb_get(skb); 206 207 if (likely(skb2)) { 208 skb_dst_drop(skb2); 209 __skb_pull(skb2, tcp_hdrlen(skb)); 210 skb_set_owner_r(skb2, child); 211 __skb_queue_tail(&child->sk_receive_queue, skb2); 212 tp->syn_data_acked = 1; 213 } else { 214 end_seq = TCP_SKB_CB(skb)->seq + 1; 215 } 216 } 217 tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = end_seq; 218 sk->sk_data_ready(sk); 219 bh_unlock_sock(child); 220 sock_put(child); 221 WARN_ON(req->sk == NULL); 222 return true; 223 } 224 225 static bool tcp_fastopen_queue_check(struct sock *sk) 226 { 227 struct fastopen_queue *fastopenq; 228 229 /* Make sure the listener has enabled fastopen, and we don't 230 * exceed the max # of pending TFO requests allowed before trying 231 * to validating the cookie in order to avoid burning CPU cycles 232 * unnecessarily. 233 * 234 * XXX (TFO) - The implication of checking the max_qlen before 235 * processing a cookie request is that clients can't differentiate 236 * between qlen overflow causing Fast Open to be disabled 237 * temporarily vs a server not supporting Fast Open at all. 238 */ 239 fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq; 240 if (fastopenq == NULL || fastopenq->max_qlen == 0) 241 return false; 242 243 if (fastopenq->qlen >= fastopenq->max_qlen) { 244 struct request_sock *req1; 245 spin_lock(&fastopenq->lock); 246 req1 = fastopenq->rskq_rst_head; 247 if ((req1 == NULL) || time_after(req1->expires, jiffies)) { 248 spin_unlock(&fastopenq->lock); 249 NET_INC_STATS_BH(sock_net(sk), 250 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW); 251 return false; 252 } 253 fastopenq->rskq_rst_head = req1->dl_next; 254 fastopenq->qlen--; 255 spin_unlock(&fastopenq->lock); 256 reqsk_free(req1); 257 } 258 return true; 259 } 260 261 /* Returns true if we should perform Fast Open on the SYN. The cookie (foc) 262 * may be updated and return the client in the SYN-ACK later. E.g., Fast Open 263 * cookie request (foc->len == 0). 264 */ 265 bool tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, 266 struct request_sock *req, 267 struct tcp_fastopen_cookie *foc, 268 struct dst_entry *dst) 269 { 270 struct tcp_fastopen_cookie valid_foc = { .len = -1 }; 271 bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1; 272 273 if (foc->len == 0) /* Client requests a cookie */ 274 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD); 275 276 if (!((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) && 277 (syn_data || foc->len >= 0) && 278 tcp_fastopen_queue_check(sk))) { 279 foc->len = -1; 280 return false; 281 } 282 283 if (syn_data && (sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD)) 284 goto fastopen; 285 286 if (foc->len >= 0 && /* Client presents or requests a cookie */ 287 tcp_fastopen_cookie_gen(req, skb, &valid_foc) && 288 foc->len == TCP_FASTOPEN_COOKIE_SIZE && 289 foc->len == valid_foc.len && 290 !memcmp(foc->val, valid_foc.val, foc->len)) { 291 /* Cookie is valid. Create a (full) child socket to accept 292 * the data in SYN before returning a SYN-ACK to ack the 293 * data. If we fail to create the socket, fall back and 294 * ack the ISN only but includes the same cookie. 295 * 296 * Note: Data-less SYN with valid cookie is allowed to send 297 * data in SYN_RECV state. 298 */ 299 fastopen: 300 if (tcp_fastopen_create_child(sk, skb, dst, req)) { 301 foc->len = -1; 302 NET_INC_STATS_BH(sock_net(sk), 303 LINUX_MIB_TCPFASTOPENPASSIVE); 304 return true; 305 } 306 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL); 307 } else if (foc->len > 0) /* Client presents an invalid cookie */ 308 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL); 309 310 *foc = valid_foc; 311 return false; 312 } 313 EXPORT_SYMBOL(tcp_try_fastopen); 314