1 #include <linux/crypto.h> 2 #include <linux/err.h> 3 #include <linux/init.h> 4 #include <linux/kernel.h> 5 #include <linux/list.h> 6 #include <linux/tcp.h> 7 #include <linux/rcupdate.h> 8 #include <linux/rculist.h> 9 #include <net/inetpeer.h> 10 #include <net/tcp.h> 11 12 int sysctl_tcp_fastopen __read_mostly = TFO_CLIENT_ENABLE; 13 14 struct tcp_fastopen_context __rcu *tcp_fastopen_ctx; 15 16 static DEFINE_SPINLOCK(tcp_fastopen_ctx_lock); 17 18 void tcp_fastopen_init_key_once(bool publish) 19 { 20 static u8 key[TCP_FASTOPEN_KEY_LENGTH]; 21 22 /* tcp_fastopen_reset_cipher publishes the new context 23 * atomically, so we allow this race happening here. 24 * 25 * All call sites of tcp_fastopen_cookie_gen also check 26 * for a valid cookie, so this is an acceptable risk. 27 */ 28 if (net_get_random_once(key, sizeof(key)) && publish) 29 tcp_fastopen_reset_cipher(key, sizeof(key)); 30 } 31 32 static void tcp_fastopen_ctx_free(struct rcu_head *head) 33 { 34 struct tcp_fastopen_context *ctx = 35 container_of(head, struct tcp_fastopen_context, rcu); 36 crypto_free_cipher(ctx->tfm); 37 kfree(ctx); 38 } 39 40 int tcp_fastopen_reset_cipher(void *key, unsigned int len) 41 { 42 int err; 43 struct tcp_fastopen_context *ctx, *octx; 44 45 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 46 if (!ctx) 47 return -ENOMEM; 48 ctx->tfm = crypto_alloc_cipher("aes", 0, 0); 49 50 if (IS_ERR(ctx->tfm)) { 51 err = PTR_ERR(ctx->tfm); 52 error: kfree(ctx); 53 pr_err("TCP: TFO aes cipher alloc error: %d\n", err); 54 return err; 55 } 56 err = crypto_cipher_setkey(ctx->tfm, key, len); 57 if (err) { 58 pr_err("TCP: TFO cipher key error: %d\n", err); 59 crypto_free_cipher(ctx->tfm); 60 goto error; 61 } 62 memcpy(ctx->key, key, len); 63 64 spin_lock(&tcp_fastopen_ctx_lock); 65 66 octx = rcu_dereference_protected(tcp_fastopen_ctx, 67 lockdep_is_held(&tcp_fastopen_ctx_lock)); 68 rcu_assign_pointer(tcp_fastopen_ctx, ctx); 69 spin_unlock(&tcp_fastopen_ctx_lock); 70 71 if (octx) 72 call_rcu(&octx->rcu, tcp_fastopen_ctx_free); 73 return err; 74 } 75 76 static bool __tcp_fastopen_cookie_gen(const void *path, 77 struct tcp_fastopen_cookie *foc) 78 { 79 struct tcp_fastopen_context *ctx; 80 bool ok = false; 81 82 rcu_read_lock(); 83 ctx = rcu_dereference(tcp_fastopen_ctx); 84 if (ctx) { 85 crypto_cipher_encrypt_one(ctx->tfm, foc->val, path); 86 foc->len = TCP_FASTOPEN_COOKIE_SIZE; 87 ok = true; 88 } 89 rcu_read_unlock(); 90 return ok; 91 } 92 93 /* Generate the fastopen cookie by doing aes128 encryption on both 94 * the source and destination addresses. Pad 0s for IPv4 or IPv4-mapped-IPv6 95 * addresses. For the longer IPv6 addresses use CBC-MAC. 96 * 97 * XXX (TFO) - refactor when TCP_FASTOPEN_COOKIE_SIZE != AES_BLOCK_SIZE. 98 */ 99 static bool tcp_fastopen_cookie_gen(struct request_sock *req, 100 struct sk_buff *syn, 101 struct tcp_fastopen_cookie *foc) 102 { 103 if (req->rsk_ops->family == AF_INET) { 104 const struct iphdr *iph = ip_hdr(syn); 105 106 __be32 path[4] = { iph->saddr, iph->daddr, 0, 0 }; 107 return __tcp_fastopen_cookie_gen(path, foc); 108 } 109 110 #if IS_ENABLED(CONFIG_IPV6) 111 if (req->rsk_ops->family == AF_INET6) { 112 const struct ipv6hdr *ip6h = ipv6_hdr(syn); 113 struct tcp_fastopen_cookie tmp; 114 115 if (__tcp_fastopen_cookie_gen(&ip6h->saddr, &tmp)) { 116 struct in6_addr *buf = &tmp.addr; 117 int i; 118 119 for (i = 0; i < 4; i++) 120 buf->s6_addr32[i] ^= ip6h->daddr.s6_addr32[i]; 121 return __tcp_fastopen_cookie_gen(buf, foc); 122 } 123 } 124 #endif 125 return false; 126 } 127 128 129 /* If an incoming SYN or SYNACK frame contains a payload and/or FIN, 130 * queue this additional data / FIN. 131 */ 132 void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb) 133 { 134 struct tcp_sock *tp = tcp_sk(sk); 135 136 if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt) 137 return; 138 139 skb = skb_clone(skb, GFP_ATOMIC); 140 if (!skb) 141 return; 142 143 skb_dst_drop(skb); 144 /* segs_in has been initialized to 1 in tcp_create_openreq_child(). 145 * Hence, reset segs_in to 0 before calling tcp_segs_in() 146 * to avoid double counting. Also, tcp_segs_in() expects 147 * skb->len to include the tcp_hdrlen. Hence, it should 148 * be called before __skb_pull(). 149 */ 150 tp->segs_in = 0; 151 tcp_segs_in(tp, skb); 152 __skb_pull(skb, tcp_hdrlen(skb)); 153 sk_forced_mem_schedule(sk, skb->truesize); 154 skb_set_owner_r(skb, sk); 155 156 TCP_SKB_CB(skb)->seq++; 157 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN; 158 159 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 160 __skb_queue_tail(&sk->sk_receive_queue, skb); 161 tp->syn_data_acked = 1; 162 163 /* u64_stats_update_begin(&tp->syncp) not needed here, 164 * as we certainly are not changing upper 32bit value (0) 165 */ 166 tp->bytes_received = skb->len; 167 168 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 169 tcp_fin(sk); 170 } 171 172 static struct sock *tcp_fastopen_create_child(struct sock *sk, 173 struct sk_buff *skb, 174 struct dst_entry *dst, 175 struct request_sock *req) 176 { 177 struct tcp_sock *tp; 178 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; 179 struct sock *child; 180 bool own_req; 181 182 req->num_retrans = 0; 183 req->num_timeout = 0; 184 req->sk = NULL; 185 186 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, 187 NULL, &own_req); 188 if (!child) 189 return NULL; 190 191 spin_lock(&queue->fastopenq.lock); 192 queue->fastopenq.qlen++; 193 spin_unlock(&queue->fastopenq.lock); 194 195 /* Initialize the child socket. Have to fix some values to take 196 * into account the child is a Fast Open socket and is created 197 * only out of the bits carried in the SYN packet. 198 */ 199 tp = tcp_sk(child); 200 201 tp->fastopen_rsk = req; 202 tcp_rsk(req)->tfo_listener = true; 203 204 /* RFC1323: The window in SYN & SYN/ACK segments is never 205 * scaled. So correct it appropriately. 206 */ 207 tp->snd_wnd = ntohs(tcp_hdr(skb)->window); 208 tp->max_window = tp->snd_wnd; 209 210 /* Activate the retrans timer so that SYNACK can be retransmitted. 211 * The request socket is not added to the ehash 212 * because it's been added to the accept queue directly. 213 */ 214 inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS, 215 TCP_TIMEOUT_INIT, TCP_RTO_MAX); 216 217 refcount_set(&req->rsk_refcnt, 2); 218 219 /* Now finish processing the fastopen child socket. */ 220 inet_csk(child)->icsk_af_ops->rebuild_header(child); 221 tcp_init_congestion_control(child); 222 tcp_mtup_init(child); 223 tcp_init_metrics(child); 224 tcp_call_bpf(child, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB); 225 tcp_init_buffer_space(child); 226 227 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; 228 229 tcp_fastopen_add_skb(child, skb); 230 231 tcp_rsk(req)->rcv_nxt = tp->rcv_nxt; 232 tp->rcv_wup = tp->rcv_nxt; 233 /* tcp_conn_request() is sending the SYNACK, 234 * and queues the child into listener accept queue. 235 */ 236 return child; 237 } 238 239 static bool tcp_fastopen_queue_check(struct sock *sk) 240 { 241 struct fastopen_queue *fastopenq; 242 243 /* Make sure the listener has enabled fastopen, and we don't 244 * exceed the max # of pending TFO requests allowed before trying 245 * to validating the cookie in order to avoid burning CPU cycles 246 * unnecessarily. 247 * 248 * XXX (TFO) - The implication of checking the max_qlen before 249 * processing a cookie request is that clients can't differentiate 250 * between qlen overflow causing Fast Open to be disabled 251 * temporarily vs a server not supporting Fast Open at all. 252 */ 253 fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq; 254 if (fastopenq->max_qlen == 0) 255 return false; 256 257 if (fastopenq->qlen >= fastopenq->max_qlen) { 258 struct request_sock *req1; 259 spin_lock(&fastopenq->lock); 260 req1 = fastopenq->rskq_rst_head; 261 if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) { 262 __NET_INC_STATS(sock_net(sk), 263 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW); 264 spin_unlock(&fastopenq->lock); 265 return false; 266 } 267 fastopenq->rskq_rst_head = req1->dl_next; 268 fastopenq->qlen--; 269 spin_unlock(&fastopenq->lock); 270 reqsk_put(req1); 271 } 272 return true; 273 } 274 275 /* Returns true if we should perform Fast Open on the SYN. The cookie (foc) 276 * may be updated and return the client in the SYN-ACK later. E.g., Fast Open 277 * cookie request (foc->len == 0). 278 */ 279 struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, 280 struct request_sock *req, 281 struct tcp_fastopen_cookie *foc, 282 struct dst_entry *dst) 283 { 284 struct tcp_fastopen_cookie valid_foc = { .len = -1 }; 285 bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1; 286 struct sock *child; 287 288 if (foc->len == 0) /* Client requests a cookie */ 289 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD); 290 291 if (!((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) && 292 (syn_data || foc->len >= 0) && 293 tcp_fastopen_queue_check(sk))) { 294 foc->len = -1; 295 return NULL; 296 } 297 298 if (syn_data && (sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD)) 299 goto fastopen; 300 301 if (foc->len >= 0 && /* Client presents or requests a cookie */ 302 tcp_fastopen_cookie_gen(req, skb, &valid_foc) && 303 foc->len == TCP_FASTOPEN_COOKIE_SIZE && 304 foc->len == valid_foc.len && 305 !memcmp(foc->val, valid_foc.val, foc->len)) { 306 /* Cookie is valid. Create a (full) child socket to accept 307 * the data in SYN before returning a SYN-ACK to ack the 308 * data. If we fail to create the socket, fall back and 309 * ack the ISN only but includes the same cookie. 310 * 311 * Note: Data-less SYN with valid cookie is allowed to send 312 * data in SYN_RECV state. 313 */ 314 fastopen: 315 child = tcp_fastopen_create_child(sk, skb, dst, req); 316 if (child) { 317 foc->len = -1; 318 NET_INC_STATS(sock_net(sk), 319 LINUX_MIB_TCPFASTOPENPASSIVE); 320 return child; 321 } 322 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL); 323 } else if (foc->len > 0) /* Client presents an invalid cookie */ 324 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL); 325 326 valid_foc.exp = foc->exp; 327 *foc = valid_foc; 328 return NULL; 329 } 330 331 bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss, 332 struct tcp_fastopen_cookie *cookie) 333 { 334 unsigned long last_syn_loss = 0; 335 int syn_loss = 0; 336 337 tcp_fastopen_cache_get(sk, mss, cookie, &syn_loss, &last_syn_loss); 338 339 /* Recurring FO SYN losses: no cookie or data in SYN */ 340 if (syn_loss > 1 && 341 time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) { 342 cookie->len = -1; 343 return false; 344 } 345 346 /* Firewall blackhole issue check */ 347 if (tcp_fastopen_active_should_disable(sk)) { 348 cookie->len = -1; 349 return false; 350 } 351 352 if (sysctl_tcp_fastopen & TFO_CLIENT_NO_COOKIE) { 353 cookie->len = -1; 354 return true; 355 } 356 return cookie->len > 0; 357 } 358 359 /* This function checks if we want to defer sending SYN until the first 360 * write(). We defer under the following conditions: 361 * 1. fastopen_connect sockopt is set 362 * 2. we have a valid cookie 363 * Return value: return true if we want to defer until application writes data 364 * return false if we want to send out SYN immediately 365 */ 366 bool tcp_fastopen_defer_connect(struct sock *sk, int *err) 367 { 368 struct tcp_fastopen_cookie cookie = { .len = 0 }; 369 struct tcp_sock *tp = tcp_sk(sk); 370 u16 mss; 371 372 if (tp->fastopen_connect && !tp->fastopen_req) { 373 if (tcp_fastopen_cookie_check(sk, &mss, &cookie)) { 374 inet_sk(sk)->defer_connect = 1; 375 return true; 376 } 377 378 /* Alloc fastopen_req in order for FO option to be included 379 * in SYN 380 */ 381 tp->fastopen_req = kzalloc(sizeof(*tp->fastopen_req), 382 sk->sk_allocation); 383 if (tp->fastopen_req) 384 tp->fastopen_req->cookie = cookie; 385 else 386 *err = -ENOBUFS; 387 } 388 return false; 389 } 390 EXPORT_SYMBOL(tcp_fastopen_defer_connect); 391 392 /* 393 * The following code block is to deal with middle box issues with TFO: 394 * Middlebox firewall issues can potentially cause server's data being 395 * blackholed after a successful 3WHS using TFO. 396 * The proposed solution is to disable active TFO globally under the 397 * following circumstances: 398 * 1. client side TFO socket receives out of order FIN 399 * 2. client side TFO socket receives out of order RST 400 * We disable active side TFO globally for 1hr at first. Then if it 401 * happens again, we disable it for 2h, then 4h, 8h, ... 402 * And we reset the timeout back to 1hr when we see a successful active 403 * TFO connection with data exchanges. 404 */ 405 406 /* Default to 1hr */ 407 unsigned int sysctl_tcp_fastopen_blackhole_timeout __read_mostly = 60 * 60; 408 static atomic_t tfo_active_disable_times __read_mostly = ATOMIC_INIT(0); 409 static unsigned long tfo_active_disable_stamp __read_mostly; 410 411 /* Disable active TFO and record current jiffies and 412 * tfo_active_disable_times 413 */ 414 void tcp_fastopen_active_disable(struct sock *sk) 415 { 416 atomic_inc(&tfo_active_disable_times); 417 tfo_active_disable_stamp = jiffies; 418 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENBLACKHOLE); 419 } 420 421 /* Reset tfo_active_disable_times to 0 */ 422 void tcp_fastopen_active_timeout_reset(void) 423 { 424 atomic_set(&tfo_active_disable_times, 0); 425 } 426 427 /* Calculate timeout for tfo active disable 428 * Return true if we are still in the active TFO disable period 429 * Return false if timeout already expired and we should use active TFO 430 */ 431 bool tcp_fastopen_active_should_disable(struct sock *sk) 432 { 433 int tfo_da_times = atomic_read(&tfo_active_disable_times); 434 int multiplier; 435 unsigned long timeout; 436 437 if (!tfo_da_times) 438 return false; 439 440 /* Limit timout to max: 2^6 * initial timeout */ 441 multiplier = 1 << min(tfo_da_times - 1, 6); 442 timeout = multiplier * sysctl_tcp_fastopen_blackhole_timeout * HZ; 443 if (time_before(jiffies, tfo_active_disable_stamp + timeout)) 444 return true; 445 446 /* Mark check bit so we can check for successful active TFO 447 * condition and reset tfo_active_disable_times 448 */ 449 tcp_sk(sk)->syn_fastopen_ch = 1; 450 return false; 451 } 452 453 /* Disable active TFO if FIN is the only packet in the ofo queue 454 * and no data is received. 455 * Also check if we can reset tfo_active_disable_times if data is 456 * received successfully on a marked active TFO sockets opened on 457 * a non-loopback interface 458 */ 459 void tcp_fastopen_active_disable_ofo_check(struct sock *sk) 460 { 461 struct tcp_sock *tp = tcp_sk(sk); 462 struct rb_node *p; 463 struct sk_buff *skb; 464 struct dst_entry *dst; 465 466 if (!tp->syn_fastopen) 467 return; 468 469 if (!tp->data_segs_in) { 470 p = rb_first(&tp->out_of_order_queue); 471 if (p && !rb_next(p)) { 472 skb = rb_entry(p, struct sk_buff, rbnode); 473 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { 474 tcp_fastopen_active_disable(sk); 475 return; 476 } 477 } 478 } else if (tp->syn_fastopen_ch && 479 atomic_read(&tfo_active_disable_times)) { 480 dst = sk_dst_get(sk); 481 if (!(dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK))) 482 tcp_fastopen_active_timeout_reset(); 483 dst_release(dst); 484 } 485 } 486