xref: /openbmc/linux/net/ipv4/tcp_fastopen.c (revision 0f4630f3)
1 #include <linux/err.h>
2 #include <linux/init.h>
3 #include <linux/kernel.h>
4 #include <linux/list.h>
5 #include <linux/tcp.h>
6 #include <linux/rcupdate.h>
7 #include <linux/rculist.h>
8 #include <net/inetpeer.h>
9 #include <net/tcp.h>
10 
11 int sysctl_tcp_fastopen __read_mostly = TFO_CLIENT_ENABLE;
12 
13 struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
14 
15 static DEFINE_SPINLOCK(tcp_fastopen_ctx_lock);
16 
17 void tcp_fastopen_init_key_once(bool publish)
18 {
19 	static u8 key[TCP_FASTOPEN_KEY_LENGTH];
20 
21 	/* tcp_fastopen_reset_cipher publishes the new context
22 	 * atomically, so we allow this race happening here.
23 	 *
24 	 * All call sites of tcp_fastopen_cookie_gen also check
25 	 * for a valid cookie, so this is an acceptable risk.
26 	 */
27 	if (net_get_random_once(key, sizeof(key)) && publish)
28 		tcp_fastopen_reset_cipher(key, sizeof(key));
29 }
30 
31 static void tcp_fastopen_ctx_free(struct rcu_head *head)
32 {
33 	struct tcp_fastopen_context *ctx =
34 	    container_of(head, struct tcp_fastopen_context, rcu);
35 	crypto_free_cipher(ctx->tfm);
36 	kfree(ctx);
37 }
38 
39 int tcp_fastopen_reset_cipher(void *key, unsigned int len)
40 {
41 	int err;
42 	struct tcp_fastopen_context *ctx, *octx;
43 
44 	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
45 	if (!ctx)
46 		return -ENOMEM;
47 	ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
48 
49 	if (IS_ERR(ctx->tfm)) {
50 		err = PTR_ERR(ctx->tfm);
51 error:		kfree(ctx);
52 		pr_err("TCP: TFO aes cipher alloc error: %d\n", err);
53 		return err;
54 	}
55 	err = crypto_cipher_setkey(ctx->tfm, key, len);
56 	if (err) {
57 		pr_err("TCP: TFO cipher key error: %d\n", err);
58 		crypto_free_cipher(ctx->tfm);
59 		goto error;
60 	}
61 	memcpy(ctx->key, key, len);
62 
63 	spin_lock(&tcp_fastopen_ctx_lock);
64 
65 	octx = rcu_dereference_protected(tcp_fastopen_ctx,
66 				lockdep_is_held(&tcp_fastopen_ctx_lock));
67 	rcu_assign_pointer(tcp_fastopen_ctx, ctx);
68 	spin_unlock(&tcp_fastopen_ctx_lock);
69 
70 	if (octx)
71 		call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
72 	return err;
73 }
74 
75 static bool __tcp_fastopen_cookie_gen(const void *path,
76 				      struct tcp_fastopen_cookie *foc)
77 {
78 	struct tcp_fastopen_context *ctx;
79 	bool ok = false;
80 
81 	rcu_read_lock();
82 	ctx = rcu_dereference(tcp_fastopen_ctx);
83 	if (ctx) {
84 		crypto_cipher_encrypt_one(ctx->tfm, foc->val, path);
85 		foc->len = TCP_FASTOPEN_COOKIE_SIZE;
86 		ok = true;
87 	}
88 	rcu_read_unlock();
89 	return ok;
90 }
91 
92 /* Generate the fastopen cookie by doing aes128 encryption on both
93  * the source and destination addresses. Pad 0s for IPv4 or IPv4-mapped-IPv6
94  * addresses. For the longer IPv6 addresses use CBC-MAC.
95  *
96  * XXX (TFO) - refactor when TCP_FASTOPEN_COOKIE_SIZE != AES_BLOCK_SIZE.
97  */
98 static bool tcp_fastopen_cookie_gen(struct request_sock *req,
99 				    struct sk_buff *syn,
100 				    struct tcp_fastopen_cookie *foc)
101 {
102 	if (req->rsk_ops->family == AF_INET) {
103 		const struct iphdr *iph = ip_hdr(syn);
104 
105 		__be32 path[4] = { iph->saddr, iph->daddr, 0, 0 };
106 		return __tcp_fastopen_cookie_gen(path, foc);
107 	}
108 
109 #if IS_ENABLED(CONFIG_IPV6)
110 	if (req->rsk_ops->family == AF_INET6) {
111 		const struct ipv6hdr *ip6h = ipv6_hdr(syn);
112 		struct tcp_fastopen_cookie tmp;
113 
114 		if (__tcp_fastopen_cookie_gen(&ip6h->saddr, &tmp)) {
115 			struct in6_addr *buf = (struct in6_addr *) tmp.val;
116 			int i;
117 
118 			for (i = 0; i < 4; i++)
119 				buf->s6_addr32[i] ^= ip6h->daddr.s6_addr32[i];
120 			return __tcp_fastopen_cookie_gen(buf, foc);
121 		}
122 	}
123 #endif
124 	return false;
125 }
126 
127 static struct sock *tcp_fastopen_create_child(struct sock *sk,
128 					      struct sk_buff *skb,
129 					      struct dst_entry *dst,
130 					      struct request_sock *req)
131 {
132 	struct tcp_sock *tp;
133 	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
134 	struct sock *child;
135 	u32 end_seq;
136 	bool own_req;
137 
138 	req->num_retrans = 0;
139 	req->num_timeout = 0;
140 	req->sk = NULL;
141 
142 	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
143 							 NULL, &own_req);
144 	if (!child)
145 		return NULL;
146 
147 	spin_lock(&queue->fastopenq.lock);
148 	queue->fastopenq.qlen++;
149 	spin_unlock(&queue->fastopenq.lock);
150 
151 	/* Initialize the child socket. Have to fix some values to take
152 	 * into account the child is a Fast Open socket and is created
153 	 * only out of the bits carried in the SYN packet.
154 	 */
155 	tp = tcp_sk(child);
156 
157 	tp->fastopen_rsk = req;
158 	tcp_rsk(req)->tfo_listener = true;
159 
160 	/* RFC1323: The window in SYN & SYN/ACK segments is never
161 	 * scaled. So correct it appropriately.
162 	 */
163 	tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
164 
165 	/* Activate the retrans timer so that SYNACK can be retransmitted.
166 	 * The request socket is not added to the ehash
167 	 * because it's been added to the accept queue directly.
168 	 */
169 	inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
170 				  TCP_TIMEOUT_INIT, TCP_RTO_MAX);
171 
172 	atomic_set(&req->rsk_refcnt, 2);
173 
174 	/* Now finish processing the fastopen child socket. */
175 	inet_csk(child)->icsk_af_ops->rebuild_header(child);
176 	tcp_init_congestion_control(child);
177 	tcp_mtup_init(child);
178 	tcp_init_metrics(child);
179 	tcp_init_buffer_space(child);
180 
181 	/* Queue the data carried in the SYN packet.
182 	 * We used to play tricky games with skb_get().
183 	 * With lockless listener, it is a dead end.
184 	 * Do not think about it.
185 	 *
186 	 * XXX (TFO) - we honor a zero-payload TFO request for now,
187 	 * (any reason not to?) but no need to queue the skb since
188 	 * there is no data. How about SYN+FIN?
189 	 */
190 	end_seq = TCP_SKB_CB(skb)->end_seq;
191 	if (end_seq != TCP_SKB_CB(skb)->seq + 1) {
192 		struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
193 
194 		if (likely(skb2)) {
195 			skb_dst_drop(skb2);
196 			__skb_pull(skb2, tcp_hdrlen(skb));
197 			skb_set_owner_r(skb2, child);
198 			__skb_queue_tail(&child->sk_receive_queue, skb2);
199 			tp->syn_data_acked = 1;
200 
201 			/* u64_stats_update_begin(&tp->syncp) not needed here,
202 			 * as we certainly are not changing upper 32bit value (0)
203 			 */
204 			tp->bytes_received = end_seq - TCP_SKB_CB(skb)->seq - 1;
205 		} else {
206 			end_seq = TCP_SKB_CB(skb)->seq + 1;
207 		}
208 	}
209 	tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = end_seq;
210 	/* tcp_conn_request() is sending the SYNACK,
211 	 * and queues the child into listener accept queue.
212 	 */
213 	return child;
214 }
215 
216 static bool tcp_fastopen_queue_check(struct sock *sk)
217 {
218 	struct fastopen_queue *fastopenq;
219 
220 	/* Make sure the listener has enabled fastopen, and we don't
221 	 * exceed the max # of pending TFO requests allowed before trying
222 	 * to validating the cookie in order to avoid burning CPU cycles
223 	 * unnecessarily.
224 	 *
225 	 * XXX (TFO) - The implication of checking the max_qlen before
226 	 * processing a cookie request is that clients can't differentiate
227 	 * between qlen overflow causing Fast Open to be disabled
228 	 * temporarily vs a server not supporting Fast Open at all.
229 	 */
230 	fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq;
231 	if (fastopenq->max_qlen == 0)
232 		return false;
233 
234 	if (fastopenq->qlen >= fastopenq->max_qlen) {
235 		struct request_sock *req1;
236 		spin_lock(&fastopenq->lock);
237 		req1 = fastopenq->rskq_rst_head;
238 		if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) {
239 			spin_unlock(&fastopenq->lock);
240 			NET_INC_STATS_BH(sock_net(sk),
241 					 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
242 			return false;
243 		}
244 		fastopenq->rskq_rst_head = req1->dl_next;
245 		fastopenq->qlen--;
246 		spin_unlock(&fastopenq->lock);
247 		reqsk_put(req1);
248 	}
249 	return true;
250 }
251 
252 /* Returns true if we should perform Fast Open on the SYN. The cookie (foc)
253  * may be updated and return the client in the SYN-ACK later. E.g., Fast Open
254  * cookie request (foc->len == 0).
255  */
256 struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
257 			      struct request_sock *req,
258 			      struct tcp_fastopen_cookie *foc,
259 			      struct dst_entry *dst)
260 {
261 	struct tcp_fastopen_cookie valid_foc = { .len = -1 };
262 	bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1;
263 	struct sock *child;
264 
265 	if (foc->len == 0) /* Client requests a cookie */
266 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);
267 
268 	if (!((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) &&
269 	      (syn_data || foc->len >= 0) &&
270 	      tcp_fastopen_queue_check(sk))) {
271 		foc->len = -1;
272 		return NULL;
273 	}
274 
275 	if (syn_data && (sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD))
276 		goto fastopen;
277 
278 	if (foc->len >= 0 &&  /* Client presents or requests a cookie */
279 	    tcp_fastopen_cookie_gen(req, skb, &valid_foc) &&
280 	    foc->len == TCP_FASTOPEN_COOKIE_SIZE &&
281 	    foc->len == valid_foc.len &&
282 	    !memcmp(foc->val, valid_foc.val, foc->len)) {
283 		/* Cookie is valid. Create a (full) child socket to accept
284 		 * the data in SYN before returning a SYN-ACK to ack the
285 		 * data. If we fail to create the socket, fall back and
286 		 * ack the ISN only but includes the same cookie.
287 		 *
288 		 * Note: Data-less SYN with valid cookie is allowed to send
289 		 * data in SYN_RECV state.
290 		 */
291 fastopen:
292 		child = tcp_fastopen_create_child(sk, skb, dst, req);
293 		if (child) {
294 			foc->len = -1;
295 			NET_INC_STATS_BH(sock_net(sk),
296 					 LINUX_MIB_TCPFASTOPENPASSIVE);
297 			return child;
298 		}
299 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
300 	} else if (foc->len > 0) /* Client presents an invalid cookie */
301 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
302 
303 	valid_foc.exp = foc->exp;
304 	*foc = valid_foc;
305 	return NULL;
306 }
307