xref: /openbmc/linux/net/ipv4/tcp_fastopen.c (revision 9eba9353)
1cf80e0e4SHerbert Xu #include <linux/crypto.h>
210467163SJerry Chu #include <linux/err.h>
32100c8d2SYuchung Cheng #include <linux/init.h>
42100c8d2SYuchung Cheng #include <linux/kernel.h>
510467163SJerry Chu #include <linux/list.h>
610467163SJerry Chu #include <linux/tcp.h>
710467163SJerry Chu #include <linux/rcupdate.h>
810467163SJerry Chu #include <linux/rculist.h>
910467163SJerry Chu #include <net/inetpeer.h>
1010467163SJerry Chu #include <net/tcp.h>
112100c8d2SYuchung Cheng 
1243713848SHaishuang Yan void tcp_fastopen_init_key_once(struct net *net)
13222e83d2SHannes Frederic Sowa {
1443713848SHaishuang Yan 	u8 key[TCP_FASTOPEN_KEY_LENGTH];
1543713848SHaishuang Yan 	struct tcp_fastopen_context *ctxt;
1643713848SHaishuang Yan 
1743713848SHaishuang Yan 	rcu_read_lock();
1843713848SHaishuang Yan 	ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
1943713848SHaishuang Yan 	if (ctxt) {
2043713848SHaishuang Yan 		rcu_read_unlock();
2143713848SHaishuang Yan 		return;
2243713848SHaishuang Yan 	}
2343713848SHaishuang Yan 	rcu_read_unlock();
24222e83d2SHannes Frederic Sowa 
25222e83d2SHannes Frederic Sowa 	/* tcp_fastopen_reset_cipher publishes the new context
26222e83d2SHannes Frederic Sowa 	 * atomically, so we allow this race happening here.
27222e83d2SHannes Frederic Sowa 	 *
28222e83d2SHannes Frederic Sowa 	 * All call sites of tcp_fastopen_cookie_gen also check
29222e83d2SHannes Frederic Sowa 	 * for a valid cookie, so this is an acceptable risk.
30222e83d2SHannes Frederic Sowa 	 */
3143713848SHaishuang Yan 	get_random_bytes(key, sizeof(key));
321fba70e5SYuchung Cheng 	tcp_fastopen_reset_cipher(net, NULL, key, sizeof(key));
33222e83d2SHannes Frederic Sowa }
34222e83d2SHannes Frederic Sowa 
3510467163SJerry Chu static void tcp_fastopen_ctx_free(struct rcu_head *head)
3610467163SJerry Chu {
3710467163SJerry Chu 	struct tcp_fastopen_context *ctx =
3810467163SJerry Chu 	    container_of(head, struct tcp_fastopen_context, rcu);
3910467163SJerry Chu 	crypto_free_cipher(ctx->tfm);
4010467163SJerry Chu 	kfree(ctx);
4110467163SJerry Chu }
4210467163SJerry Chu 
431fba70e5SYuchung Cheng void tcp_fastopen_destroy_cipher(struct sock *sk)
441fba70e5SYuchung Cheng {
451fba70e5SYuchung Cheng 	struct tcp_fastopen_context *ctx;
461fba70e5SYuchung Cheng 
471fba70e5SYuchung Cheng 	ctx = rcu_dereference_protected(
481fba70e5SYuchung Cheng 			inet_csk(sk)->icsk_accept_queue.fastopenq.ctx, 1);
491fba70e5SYuchung Cheng 	if (ctx)
501fba70e5SYuchung Cheng 		call_rcu(&ctx->rcu, tcp_fastopen_ctx_free);
511fba70e5SYuchung Cheng }
521fba70e5SYuchung Cheng 
5343713848SHaishuang Yan void tcp_fastopen_ctx_destroy(struct net *net)
5443713848SHaishuang Yan {
5543713848SHaishuang Yan 	struct tcp_fastopen_context *ctxt;
5643713848SHaishuang Yan 
5743713848SHaishuang Yan 	spin_lock(&net->ipv4.tcp_fastopen_ctx_lock);
5843713848SHaishuang Yan 
5943713848SHaishuang Yan 	ctxt = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx,
6043713848SHaishuang Yan 				lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
6143713848SHaishuang Yan 	rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, NULL);
6243713848SHaishuang Yan 	spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock);
6343713848SHaishuang Yan 
6443713848SHaishuang Yan 	if (ctxt)
6543713848SHaishuang Yan 		call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free);
6643713848SHaishuang Yan }
6743713848SHaishuang Yan 
681fba70e5SYuchung Cheng int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
691fba70e5SYuchung Cheng 			      void *key, unsigned int len)
7010467163SJerry Chu {
7110467163SJerry Chu 	struct tcp_fastopen_context *ctx, *octx;
721fba70e5SYuchung Cheng 	struct fastopen_queue *q;
731fba70e5SYuchung Cheng 	int err;
7410467163SJerry Chu 
7510467163SJerry Chu 	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
7610467163SJerry Chu 	if (!ctx)
7710467163SJerry Chu 		return -ENOMEM;
7810467163SJerry Chu 	ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
7910467163SJerry Chu 
8010467163SJerry Chu 	if (IS_ERR(ctx->tfm)) {
8110467163SJerry Chu 		err = PTR_ERR(ctx->tfm);
8210467163SJerry Chu error:		kfree(ctx);
8310467163SJerry Chu 		pr_err("TCP: TFO aes cipher alloc error: %d\n", err);
8410467163SJerry Chu 		return err;
8510467163SJerry Chu 	}
8610467163SJerry Chu 	err = crypto_cipher_setkey(ctx->tfm, key, len);
8710467163SJerry Chu 	if (err) {
8810467163SJerry Chu 		pr_err("TCP: TFO cipher key error: %d\n", err);
8910467163SJerry Chu 		crypto_free_cipher(ctx->tfm);
9010467163SJerry Chu 		goto error;
9110467163SJerry Chu 	}
9210467163SJerry Chu 	memcpy(ctx->key, key, len);
9310467163SJerry Chu 
9410467163SJerry Chu 
959eba9353SEric Dumazet 	spin_lock(&net->ipv4.tcp_fastopen_ctx_lock);
961fba70e5SYuchung Cheng 	if (sk) {
971fba70e5SYuchung Cheng 		q = &inet_csk(sk)->icsk_accept_queue.fastopenq;
981fba70e5SYuchung Cheng 		octx = rcu_dereference_protected(q->ctx,
999eba9353SEric Dumazet 			lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
1001fba70e5SYuchung Cheng 		rcu_assign_pointer(q->ctx, ctx);
1011fba70e5SYuchung Cheng 	} else {
10243713848SHaishuang Yan 		octx = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx,
10343713848SHaishuang Yan 			lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
10443713848SHaishuang Yan 		rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, ctx);
1051fba70e5SYuchung Cheng 	}
1069eba9353SEric Dumazet 	spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock);
10710467163SJerry Chu 
10810467163SJerry Chu 	if (octx)
10910467163SJerry Chu 		call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
11010467163SJerry Chu 	return err;
11110467163SJerry Chu }
11210467163SJerry Chu 
1131fba70e5SYuchung Cheng static bool __tcp_fastopen_cookie_gen(struct sock *sk, const void *path,
114149479d0SYuchung Cheng 				      struct tcp_fastopen_cookie *foc)
11510467163SJerry Chu {
11610467163SJerry Chu 	struct tcp_fastopen_context *ctx;
1173a19ce0eSDaniel Lee 	bool ok = false;
11810467163SJerry Chu 
11910467163SJerry Chu 	rcu_read_lock();
1201fba70e5SYuchung Cheng 
1211fba70e5SYuchung Cheng 	ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx);
1221fba70e5SYuchung Cheng 	if (!ctx)
1231fba70e5SYuchung Cheng 		ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx);
1241fba70e5SYuchung Cheng 
12510467163SJerry Chu 	if (ctx) {
1263a19ce0eSDaniel Lee 		crypto_cipher_encrypt_one(ctx->tfm, foc->val, path);
12710467163SJerry Chu 		foc->len = TCP_FASTOPEN_COOKIE_SIZE;
1283a19ce0eSDaniel Lee 		ok = true;
12910467163SJerry Chu 	}
13010467163SJerry Chu 	rcu_read_unlock();
1313a19ce0eSDaniel Lee 	return ok;
1323a19ce0eSDaniel Lee }
1333a19ce0eSDaniel Lee 
1343a19ce0eSDaniel Lee /* Generate the fastopen cookie by doing aes128 encryption on both
1353a19ce0eSDaniel Lee  * the source and destination addresses. Pad 0s for IPv4 or IPv4-mapped-IPv6
1363a19ce0eSDaniel Lee  * addresses. For the longer IPv6 addresses use CBC-MAC.
1373a19ce0eSDaniel Lee  *
1383a19ce0eSDaniel Lee  * XXX (TFO) - refactor when TCP_FASTOPEN_COOKIE_SIZE != AES_BLOCK_SIZE.
1393a19ce0eSDaniel Lee  */
1401fba70e5SYuchung Cheng static bool tcp_fastopen_cookie_gen(struct sock *sk,
14143713848SHaishuang Yan 				    struct request_sock *req,
1423a19ce0eSDaniel Lee 				    struct sk_buff *syn,
1433a19ce0eSDaniel Lee 				    struct tcp_fastopen_cookie *foc)
1443a19ce0eSDaniel Lee {
1453a19ce0eSDaniel Lee 	if (req->rsk_ops->family == AF_INET) {
1463a19ce0eSDaniel Lee 		const struct iphdr *iph = ip_hdr(syn);
1473a19ce0eSDaniel Lee 
1483a19ce0eSDaniel Lee 		__be32 path[4] = { iph->saddr, iph->daddr, 0, 0 };
1491fba70e5SYuchung Cheng 		return __tcp_fastopen_cookie_gen(sk, path, foc);
1503a19ce0eSDaniel Lee 	}
1513a19ce0eSDaniel Lee 
1523a19ce0eSDaniel Lee #if IS_ENABLED(CONFIG_IPV6)
1533a19ce0eSDaniel Lee 	if (req->rsk_ops->family == AF_INET6) {
1543a19ce0eSDaniel Lee 		const struct ipv6hdr *ip6h = ipv6_hdr(syn);
1553a19ce0eSDaniel Lee 		struct tcp_fastopen_cookie tmp;
1563a19ce0eSDaniel Lee 
1571fba70e5SYuchung Cheng 		if (__tcp_fastopen_cookie_gen(sk, &ip6h->saddr, &tmp)) {
158003c9410SShannon Nelson 			struct in6_addr *buf = &tmp.addr;
15941c91996SLi RongQing 			int i;
1603a19ce0eSDaniel Lee 
1613a19ce0eSDaniel Lee 			for (i = 0; i < 4; i++)
1623a19ce0eSDaniel Lee 				buf->s6_addr32[i] ^= ip6h->daddr.s6_addr32[i];
1631fba70e5SYuchung Cheng 			return __tcp_fastopen_cookie_gen(sk, buf, foc);
1643a19ce0eSDaniel Lee 		}
1653a19ce0eSDaniel Lee 	}
1663a19ce0eSDaniel Lee #endif
1673a19ce0eSDaniel Lee 	return false;
16810467163SJerry Chu }
1695b7ed089SYuchung Cheng 
17061d2bcaeSEric Dumazet 
17161d2bcaeSEric Dumazet /* If an incoming SYN or SYNACK frame contains a payload and/or FIN,
17261d2bcaeSEric Dumazet  * queue this additional data / FIN.
17361d2bcaeSEric Dumazet  */
17461d2bcaeSEric Dumazet void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
17561d2bcaeSEric Dumazet {
17661d2bcaeSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
17761d2bcaeSEric Dumazet 
17861d2bcaeSEric Dumazet 	if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt)
17961d2bcaeSEric Dumazet 		return;
18061d2bcaeSEric Dumazet 
18161d2bcaeSEric Dumazet 	skb = skb_clone(skb, GFP_ATOMIC);
18261d2bcaeSEric Dumazet 	if (!skb)
18361d2bcaeSEric Dumazet 		return;
18461d2bcaeSEric Dumazet 
18561d2bcaeSEric Dumazet 	skb_dst_drop(skb);
186a44d6eacSMartin KaFai Lau 	/* segs_in has been initialized to 1 in tcp_create_openreq_child().
187a44d6eacSMartin KaFai Lau 	 * Hence, reset segs_in to 0 before calling tcp_segs_in()
188a44d6eacSMartin KaFai Lau 	 * to avoid double counting.  Also, tcp_segs_in() expects
189a44d6eacSMartin KaFai Lau 	 * skb->len to include the tcp_hdrlen.  Hence, it should
190a44d6eacSMartin KaFai Lau 	 * be called before __skb_pull().
191a44d6eacSMartin KaFai Lau 	 */
192a44d6eacSMartin KaFai Lau 	tp->segs_in = 0;
193a44d6eacSMartin KaFai Lau 	tcp_segs_in(tp, skb);
19461d2bcaeSEric Dumazet 	__skb_pull(skb, tcp_hdrlen(skb));
19576061f63SEric Dumazet 	sk_forced_mem_schedule(sk, skb->truesize);
19661d2bcaeSEric Dumazet 	skb_set_owner_r(skb, sk);
19761d2bcaeSEric Dumazet 
1989d691539SEric Dumazet 	TCP_SKB_CB(skb)->seq++;
1999d691539SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
2009d691539SEric Dumazet 
20161d2bcaeSEric Dumazet 	tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
20261d2bcaeSEric Dumazet 	__skb_queue_tail(&sk->sk_receive_queue, skb);
20361d2bcaeSEric Dumazet 	tp->syn_data_acked = 1;
20461d2bcaeSEric Dumazet 
20561d2bcaeSEric Dumazet 	/* u64_stats_update_begin(&tp->syncp) not needed here,
20661d2bcaeSEric Dumazet 	 * as we certainly are not changing upper 32bit value (0)
20761d2bcaeSEric Dumazet 	 */
20861d2bcaeSEric Dumazet 	tp->bytes_received = skb->len;
209e3e17b77SEric Dumazet 
210e3e17b77SEric Dumazet 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
211e3e17b77SEric Dumazet 		tcp_fin(sk);
21261d2bcaeSEric Dumazet }
21361d2bcaeSEric Dumazet 
2147c85af88SEric Dumazet static struct sock *tcp_fastopen_create_child(struct sock *sk,
2155b7ed089SYuchung Cheng 					      struct sk_buff *skb,
2165b7ed089SYuchung Cheng 					      struct request_sock *req)
2175b7ed089SYuchung Cheng {
21817846376SDave Jones 	struct tcp_sock *tp;
2195b7ed089SYuchung Cheng 	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
2205b7ed089SYuchung Cheng 	struct sock *child;
2215e0724d0SEric Dumazet 	bool own_req;
2225b7ed089SYuchung Cheng 
2235b7ed089SYuchung Cheng 	req->num_retrans = 0;
2245b7ed089SYuchung Cheng 	req->num_timeout = 0;
2255b7ed089SYuchung Cheng 	req->sk = NULL;
2265b7ed089SYuchung Cheng 
2275e0724d0SEric Dumazet 	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
2285e0724d0SEric Dumazet 							 NULL, &own_req);
22951456b29SIan Morris 	if (!child)
2307c85af88SEric Dumazet 		return NULL;
2315b7ed089SYuchung Cheng 
2320536fcc0SEric Dumazet 	spin_lock(&queue->fastopenq.lock);
2330536fcc0SEric Dumazet 	queue->fastopenq.qlen++;
2340536fcc0SEric Dumazet 	spin_unlock(&queue->fastopenq.lock);
2355b7ed089SYuchung Cheng 
2365b7ed089SYuchung Cheng 	/* Initialize the child socket. Have to fix some values to take
2375b7ed089SYuchung Cheng 	 * into account the child is a Fast Open socket and is created
2385b7ed089SYuchung Cheng 	 * only out of the bits carried in the SYN packet.
2395b7ed089SYuchung Cheng 	 */
2405b7ed089SYuchung Cheng 	tp = tcp_sk(child);
2415b7ed089SYuchung Cheng 
2425b7ed089SYuchung Cheng 	tp->fastopen_rsk = req;
2439439ce00SEric Dumazet 	tcp_rsk(req)->tfo_listener = true;
2445b7ed089SYuchung Cheng 
2455b7ed089SYuchung Cheng 	/* RFC1323: The window in SYN & SYN/ACK segments is never
2465b7ed089SYuchung Cheng 	 * scaled. So correct it appropriately.
2475b7ed089SYuchung Cheng 	 */
2485b7ed089SYuchung Cheng 	tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
2490dbd7ff3SAlexey Kodanev 	tp->max_window = tp->snd_wnd;
2505b7ed089SYuchung Cheng 
2515b7ed089SYuchung Cheng 	/* Activate the retrans timer so that SYNACK can be retransmitted.
252ca6fb065SEric Dumazet 	 * The request socket is not added to the ehash
2535b7ed089SYuchung Cheng 	 * because it's been added to the accept queue directly.
2545b7ed089SYuchung Cheng 	 */
2555b7ed089SYuchung Cheng 	inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
2565b7ed089SYuchung Cheng 				  TCP_TIMEOUT_INIT, TCP_RTO_MAX);
2575b7ed089SYuchung Cheng 
25841c6d650SReshetova, Elena 	refcount_set(&req->rsk_refcnt, 2);
2595b7ed089SYuchung Cheng 
2605b7ed089SYuchung Cheng 	/* Now finish processing the fastopen child socket. */
26127204aaaSWei Wang 	tcp_init_transfer(child, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB);
2625b7ed089SYuchung Cheng 
26361d2bcaeSEric Dumazet 	tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
264ba34e6d9SEric Dumazet 
26561d2bcaeSEric Dumazet 	tcp_fastopen_add_skb(child, skb);
266d654976cSEric Dumazet 
26761d2bcaeSEric Dumazet 	tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
26828b346cbSNeal Cardwell 	tp->rcv_wup = tp->rcv_nxt;
2697656d842SEric Dumazet 	/* tcp_conn_request() is sending the SYNACK,
2707656d842SEric Dumazet 	 * and queues the child into listener accept queue.
2717c85af88SEric Dumazet 	 */
2727c85af88SEric Dumazet 	return child;
2735b7ed089SYuchung Cheng }
2745b7ed089SYuchung Cheng 
2755b7ed089SYuchung Cheng static bool tcp_fastopen_queue_check(struct sock *sk)
2765b7ed089SYuchung Cheng {
2775b7ed089SYuchung Cheng 	struct fastopen_queue *fastopenq;
2785b7ed089SYuchung Cheng 
2795b7ed089SYuchung Cheng 	/* Make sure the listener has enabled fastopen, and we don't
2805b7ed089SYuchung Cheng 	 * exceed the max # of pending TFO requests allowed before trying
2815b7ed089SYuchung Cheng 	 * to validating the cookie in order to avoid burning CPU cycles
2825b7ed089SYuchung Cheng 	 * unnecessarily.
2835b7ed089SYuchung Cheng 	 *
2845b7ed089SYuchung Cheng 	 * XXX (TFO) - The implication of checking the max_qlen before
2855b7ed089SYuchung Cheng 	 * processing a cookie request is that clients can't differentiate
2865b7ed089SYuchung Cheng 	 * between qlen overflow causing Fast Open to be disabled
2875b7ed089SYuchung Cheng 	 * temporarily vs a server not supporting Fast Open at all.
2885b7ed089SYuchung Cheng 	 */
2890536fcc0SEric Dumazet 	fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq;
2900536fcc0SEric Dumazet 	if (fastopenq->max_qlen == 0)
2915b7ed089SYuchung Cheng 		return false;
2925b7ed089SYuchung Cheng 
2935b7ed089SYuchung Cheng 	if (fastopenq->qlen >= fastopenq->max_qlen) {
2945b7ed089SYuchung Cheng 		struct request_sock *req1;
2955b7ed089SYuchung Cheng 		spin_lock(&fastopenq->lock);
2965b7ed089SYuchung Cheng 		req1 = fastopenq->rskq_rst_head;
297fa76ce73SEric Dumazet 		if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) {
29802a1d6e7SEric Dumazet 			__NET_INC_STATS(sock_net(sk),
2995b7ed089SYuchung Cheng 					LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
300c10d9310SEric Dumazet 			spin_unlock(&fastopenq->lock);
3015b7ed089SYuchung Cheng 			return false;
3025b7ed089SYuchung Cheng 		}
3035b7ed089SYuchung Cheng 		fastopenq->rskq_rst_head = req1->dl_next;
3045b7ed089SYuchung Cheng 		fastopenq->qlen--;
3055b7ed089SYuchung Cheng 		spin_unlock(&fastopenq->lock);
30613854e5aSEric Dumazet 		reqsk_put(req1);
3075b7ed089SYuchung Cheng 	}
3085b7ed089SYuchung Cheng 	return true;
3095b7ed089SYuchung Cheng }
3105b7ed089SYuchung Cheng 
31171c02379SChristoph Paasch static bool tcp_fastopen_no_cookie(const struct sock *sk,
31271c02379SChristoph Paasch 				   const struct dst_entry *dst,
31371c02379SChristoph Paasch 				   int flag)
31471c02379SChristoph Paasch {
31571c02379SChristoph Paasch 	return (sock_net(sk)->ipv4.sysctl_tcp_fastopen & flag) ||
31671c02379SChristoph Paasch 	       tcp_sk(sk)->fastopen_no_cookie ||
31771c02379SChristoph Paasch 	       (dst && dst_metric(dst, RTAX_FASTOPEN_NO_COOKIE));
31871c02379SChristoph Paasch }
31971c02379SChristoph Paasch 
32089278c9dSYuchung Cheng /* Returns true if we should perform Fast Open on the SYN. The cookie (foc)
32189278c9dSYuchung Cheng  * may be updated and return the client in the SYN-ACK later. E.g., Fast Open
32289278c9dSYuchung Cheng  * cookie request (foc->len == 0).
32389278c9dSYuchung Cheng  */
3247c85af88SEric Dumazet struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
3255b7ed089SYuchung Cheng 			      struct request_sock *req,
32671c02379SChristoph Paasch 			      struct tcp_fastopen_cookie *foc,
32771c02379SChristoph Paasch 			      const struct dst_entry *dst)
3285b7ed089SYuchung Cheng {
32989278c9dSYuchung Cheng 	bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1;
330e1cfcbe8SHaishuang Yan 	int tcp_fastopen = sock_net(sk)->ipv4.sysctl_tcp_fastopen;
331e1cfcbe8SHaishuang Yan 	struct tcp_fastopen_cookie valid_foc = { .len = -1 };
3327c85af88SEric Dumazet 	struct sock *child;
3335b7ed089SYuchung Cheng 
334531c94a9SYuchung Cheng 	if (foc->len == 0) /* Client requests a cookie */
335c10d9310SEric Dumazet 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);
336531c94a9SYuchung Cheng 
337e1cfcbe8SHaishuang Yan 	if (!((tcp_fastopen & TFO_SERVER_ENABLE) &&
33889278c9dSYuchung Cheng 	      (syn_data || foc->len >= 0) &&
33989278c9dSYuchung Cheng 	      tcp_fastopen_queue_check(sk))) {
34089278c9dSYuchung Cheng 		foc->len = -1;
3417c85af88SEric Dumazet 		return NULL;
3425b7ed089SYuchung Cheng 	}
34389278c9dSYuchung Cheng 
34471c02379SChristoph Paasch 	if (syn_data &&
34571c02379SChristoph Paasch 	    tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD))
34689278c9dSYuchung Cheng 		goto fastopen;
34789278c9dSYuchung Cheng 
348531c94a9SYuchung Cheng 	if (foc->len >= 0 &&  /* Client presents or requests a cookie */
3491fba70e5SYuchung Cheng 	    tcp_fastopen_cookie_gen(sk, req, skb, &valid_foc) &&
3503a19ce0eSDaniel Lee 	    foc->len == TCP_FASTOPEN_COOKIE_SIZE &&
35189278c9dSYuchung Cheng 	    foc->len == valid_foc.len &&
35289278c9dSYuchung Cheng 	    !memcmp(foc->val, valid_foc.val, foc->len)) {
353843f4a55SYuchung Cheng 		/* Cookie is valid. Create a (full) child socket to accept
354843f4a55SYuchung Cheng 		 * the data in SYN before returning a SYN-ACK to ack the
355843f4a55SYuchung Cheng 		 * data. If we fail to create the socket, fall back and
356843f4a55SYuchung Cheng 		 * ack the ISN only but includes the same cookie.
357843f4a55SYuchung Cheng 		 *
358843f4a55SYuchung Cheng 		 * Note: Data-less SYN with valid cookie is allowed to send
359843f4a55SYuchung Cheng 		 * data in SYN_RECV state.
360843f4a55SYuchung Cheng 		 */
36189278c9dSYuchung Cheng fastopen:
36211199369STonghao Zhang 		child = tcp_fastopen_create_child(sk, skb, req);
3637c85af88SEric Dumazet 		if (child) {
36489278c9dSYuchung Cheng 			foc->len = -1;
365c10d9310SEric Dumazet 			NET_INC_STATS(sock_net(sk),
366843f4a55SYuchung Cheng 				      LINUX_MIB_TCPFASTOPENPASSIVE);
3677c85af88SEric Dumazet 			return child;
3685b7ed089SYuchung Cheng 		}
369c10d9310SEric Dumazet 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
370531c94a9SYuchung Cheng 	} else if (foc->len > 0) /* Client presents an invalid cookie */
371c10d9310SEric Dumazet 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
3725b7ed089SYuchung Cheng 
3737f9b838bSDaniel Lee 	valid_foc.exp = foc->exp;
37489278c9dSYuchung Cheng 	*foc = valid_foc;
3757c85af88SEric Dumazet 	return NULL;
3765b7ed089SYuchung Cheng }
377065263f4SWei Wang 
378065263f4SWei Wang bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
379065263f4SWei Wang 			       struct tcp_fastopen_cookie *cookie)
380065263f4SWei Wang {
381065263f4SWei Wang 	unsigned long last_syn_loss = 0;
38271c02379SChristoph Paasch 	const struct dst_entry *dst;
383065263f4SWei Wang 	int syn_loss = 0;
384065263f4SWei Wang 
385065263f4SWei Wang 	tcp_fastopen_cache_get(sk, mss, cookie, &syn_loss, &last_syn_loss);
386065263f4SWei Wang 
387065263f4SWei Wang 	/* Recurring FO SYN losses: no cookie or data in SYN */
388065263f4SWei Wang 	if (syn_loss > 1 &&
389065263f4SWei Wang 	    time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) {
390065263f4SWei Wang 		cookie->len = -1;
391065263f4SWei Wang 		return false;
392065263f4SWei Wang 	}
393cf1ef3f0SWei Wang 
394cf1ef3f0SWei Wang 	/* Firewall blackhole issue check */
395cf1ef3f0SWei Wang 	if (tcp_fastopen_active_should_disable(sk)) {
396cf1ef3f0SWei Wang 		cookie->len = -1;
397cf1ef3f0SWei Wang 		return false;
398cf1ef3f0SWei Wang 	}
399cf1ef3f0SWei Wang 
40071c02379SChristoph Paasch 	dst = __sk_dst_get(sk);
40171c02379SChristoph Paasch 
40271c02379SChristoph Paasch 	if (tcp_fastopen_no_cookie(sk, dst, TFO_CLIENT_NO_COOKIE)) {
403065263f4SWei Wang 		cookie->len = -1;
404065263f4SWei Wang 		return true;
405065263f4SWei Wang 	}
406065263f4SWei Wang 	return cookie->len > 0;
407065263f4SWei Wang }
40819f6d3f3SWei Wang 
40919f6d3f3SWei Wang /* This function checks if we want to defer sending SYN until the first
41019f6d3f3SWei Wang  * write().  We defer under the following conditions:
41119f6d3f3SWei Wang  * 1. fastopen_connect sockopt is set
41219f6d3f3SWei Wang  * 2. we have a valid cookie
41319f6d3f3SWei Wang  * Return value: return true if we want to defer until application writes data
41419f6d3f3SWei Wang  *               return false if we want to send out SYN immediately
41519f6d3f3SWei Wang  */
41619f6d3f3SWei Wang bool tcp_fastopen_defer_connect(struct sock *sk, int *err)
41719f6d3f3SWei Wang {
41819f6d3f3SWei Wang 	struct tcp_fastopen_cookie cookie = { .len = 0 };
41919f6d3f3SWei Wang 	struct tcp_sock *tp = tcp_sk(sk);
42019f6d3f3SWei Wang 	u16 mss;
42119f6d3f3SWei Wang 
42219f6d3f3SWei Wang 	if (tp->fastopen_connect && !tp->fastopen_req) {
42319f6d3f3SWei Wang 		if (tcp_fastopen_cookie_check(sk, &mss, &cookie)) {
42419f6d3f3SWei Wang 			inet_sk(sk)->defer_connect = 1;
42519f6d3f3SWei Wang 			return true;
42619f6d3f3SWei Wang 		}
42719f6d3f3SWei Wang 
42819f6d3f3SWei Wang 		/* Alloc fastopen_req in order for FO option to be included
42919f6d3f3SWei Wang 		 * in SYN
43019f6d3f3SWei Wang 		 */
43119f6d3f3SWei Wang 		tp->fastopen_req = kzalloc(sizeof(*tp->fastopen_req),
43219f6d3f3SWei Wang 					   sk->sk_allocation);
43319f6d3f3SWei Wang 		if (tp->fastopen_req)
43419f6d3f3SWei Wang 			tp->fastopen_req->cookie = cookie;
43519f6d3f3SWei Wang 		else
43619f6d3f3SWei Wang 			*err = -ENOBUFS;
43719f6d3f3SWei Wang 	}
43819f6d3f3SWei Wang 	return false;
43919f6d3f3SWei Wang }
44019f6d3f3SWei Wang EXPORT_SYMBOL(tcp_fastopen_defer_connect);
441cf1ef3f0SWei Wang 
442cf1ef3f0SWei Wang /*
443cf1ef3f0SWei Wang  * The following code block is to deal with middle box issues with TFO:
444cf1ef3f0SWei Wang  * Middlebox firewall issues can potentially cause server's data being
445cf1ef3f0SWei Wang  * blackholed after a successful 3WHS using TFO.
446cf1ef3f0SWei Wang  * The proposed solution is to disable active TFO globally under the
447cf1ef3f0SWei Wang  * following circumstances:
448cf1ef3f0SWei Wang  *   1. client side TFO socket receives out of order FIN
449cf1ef3f0SWei Wang  *   2. client side TFO socket receives out of order RST
450cf1ef3f0SWei Wang  * We disable active side TFO globally for 1hr at first. Then if it
451cf1ef3f0SWei Wang  * happens again, we disable it for 2h, then 4h, 8h, ...
452cf1ef3f0SWei Wang  * And we reset the timeout back to 1hr when we see a successful active
453cf1ef3f0SWei Wang  * TFO connection with data exchanges.
454cf1ef3f0SWei Wang  */
455cf1ef3f0SWei Wang 
456cf1ef3f0SWei Wang /* Disable active TFO and record current jiffies and
457cf1ef3f0SWei Wang  * tfo_active_disable_times
458cf1ef3f0SWei Wang  */
45946c2fa39SWei Wang void tcp_fastopen_active_disable(struct sock *sk)
460cf1ef3f0SWei Wang {
4613733be14SHaishuang Yan 	struct net *net = sock_net(sk);
462cf1ef3f0SWei Wang 
4633733be14SHaishuang Yan 	atomic_inc(&net->ipv4.tfo_active_disable_times);
4643733be14SHaishuang Yan 	net->ipv4.tfo_active_disable_stamp = jiffies;
4653733be14SHaishuang Yan 	NET_INC_STATS(net, LINUX_MIB_TCPFASTOPENBLACKHOLE);
466cf1ef3f0SWei Wang }
467cf1ef3f0SWei Wang 
468cf1ef3f0SWei Wang /* Calculate timeout for tfo active disable
469cf1ef3f0SWei Wang  * Return true if we are still in the active TFO disable period
470cf1ef3f0SWei Wang  * Return false if timeout already expired and we should use active TFO
471cf1ef3f0SWei Wang  */
472cf1ef3f0SWei Wang bool tcp_fastopen_active_should_disable(struct sock *sk)
473cf1ef3f0SWei Wang {
4743733be14SHaishuang Yan 	unsigned int tfo_bh_timeout = sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout;
4753733be14SHaishuang Yan 	int tfo_da_times = atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times);
476cf1ef3f0SWei Wang 	unsigned long timeout;
4773733be14SHaishuang Yan 	int multiplier;
478cf1ef3f0SWei Wang 
479cf1ef3f0SWei Wang 	if (!tfo_da_times)
480cf1ef3f0SWei Wang 		return false;
481cf1ef3f0SWei Wang 
482cf1ef3f0SWei Wang 	/* Limit timout to max: 2^6 * initial timeout */
483cf1ef3f0SWei Wang 	multiplier = 1 << min(tfo_da_times - 1, 6);
4843733be14SHaishuang Yan 	timeout = multiplier * tfo_bh_timeout * HZ;
4853733be14SHaishuang Yan 	if (time_before(jiffies, sock_net(sk)->ipv4.tfo_active_disable_stamp + timeout))
486cf1ef3f0SWei Wang 		return true;
487cf1ef3f0SWei Wang 
488cf1ef3f0SWei Wang 	/* Mark check bit so we can check for successful active TFO
489cf1ef3f0SWei Wang 	 * condition and reset tfo_active_disable_times
490cf1ef3f0SWei Wang 	 */
491cf1ef3f0SWei Wang 	tcp_sk(sk)->syn_fastopen_ch = 1;
492cf1ef3f0SWei Wang 	return false;
493cf1ef3f0SWei Wang }
494cf1ef3f0SWei Wang 
495cf1ef3f0SWei Wang /* Disable active TFO if FIN is the only packet in the ofo queue
496cf1ef3f0SWei Wang  * and no data is received.
497cf1ef3f0SWei Wang  * Also check if we can reset tfo_active_disable_times if data is
498cf1ef3f0SWei Wang  * received successfully on a marked active TFO sockets opened on
499cf1ef3f0SWei Wang  * a non-loopback interface
500cf1ef3f0SWei Wang  */
501cf1ef3f0SWei Wang void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
502cf1ef3f0SWei Wang {
503cf1ef3f0SWei Wang 	struct tcp_sock *tp = tcp_sk(sk);
504cf1ef3f0SWei Wang 	struct dst_entry *dst;
50518a4c0eaSEric Dumazet 	struct sk_buff *skb;
506cf1ef3f0SWei Wang 
507cf1ef3f0SWei Wang 	if (!tp->syn_fastopen)
508cf1ef3f0SWei Wang 		return;
509cf1ef3f0SWei Wang 
510cf1ef3f0SWei Wang 	if (!tp->data_segs_in) {
51118a4c0eaSEric Dumazet 		skb = skb_rb_first(&tp->out_of_order_queue);
51218a4c0eaSEric Dumazet 		if (skb && !skb_rb_next(skb)) {
513cf1ef3f0SWei Wang 			if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
51446c2fa39SWei Wang 				tcp_fastopen_active_disable(sk);
515cf1ef3f0SWei Wang 				return;
516cf1ef3f0SWei Wang 			}
517cf1ef3f0SWei Wang 		}
518cf1ef3f0SWei Wang 	} else if (tp->syn_fastopen_ch &&
5193733be14SHaishuang Yan 		   atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times)) {
520cf1ef3f0SWei Wang 		dst = sk_dst_get(sk);
521cf1ef3f0SWei Wang 		if (!(dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK)))
5223733be14SHaishuang Yan 			atomic_set(&sock_net(sk)->ipv4.tfo_active_disable_times, 0);
523cf1ef3f0SWei Wang 		dst_release(dst);
524cf1ef3f0SWei Wang 	}
525cf1ef3f0SWei Wang }
526