xref: /openbmc/linux/net/tls/tls_device.c (revision 9b93eb47)
1 /* Copyright (c) 2018, Mellanox Technologies All rights reserved.
2  *
3  * This software is available to you under a choice of one of two
4  * licenses.  You may choose to be licensed under the terms of the GNU
5  * General Public License (GPL) Version 2, available from the file
6  * COPYING in the main directory of this source tree, or the
7  * OpenIB.org BSD license below:
8  *
9  *     Redistribution and use in source and binary forms, with or
10  *     without modification, are permitted provided that the following
11  *     conditions are met:
12  *
13  *      - Redistributions of source code must retain the above
14  *        copyright notice, this list of conditions and the following
15  *        disclaimer.
16  *
17  *      - Redistributions in binary form must reproduce the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer in the documentation and/or other materials
20  *        provided with the distribution.
21  *
22  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29  * SOFTWARE.
30  */
31 
32 #include <crypto/aead.h>
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/netdevice.h>
36 #include <net/dst.h>
37 #include <net/inet_connection_sock.h>
38 #include <net/tcp.h>
39 #include <net/tls.h>
40 
41 /* device_offload_lock is used to synchronize tls_dev_add
42  * against NETDEV_DOWN notifications.
43  */
44 static DECLARE_RWSEM(device_offload_lock);
45 
46 static void tls_device_gc_task(struct work_struct *work);
47 
48 static DECLARE_WORK(tls_device_gc_work, tls_device_gc_task);
49 static LIST_HEAD(tls_device_gc_list);
50 static LIST_HEAD(tls_device_list);
51 static DEFINE_SPINLOCK(tls_device_lock);
52 
53 static void tls_device_free_ctx(struct tls_context *ctx)
54 {
55 	if (ctx->tx_conf == TLS_HW) {
56 		kfree(tls_offload_ctx_tx(ctx));
57 		kfree(ctx->tx.rec_seq);
58 		kfree(ctx->tx.iv);
59 	}
60 
61 	if (ctx->rx_conf == TLS_HW)
62 		kfree(tls_offload_ctx_rx(ctx));
63 
64 	kfree(ctx);
65 }
66 
67 static void tls_device_gc_task(struct work_struct *work)
68 {
69 	struct tls_context *ctx, *tmp;
70 	unsigned long flags;
71 	LIST_HEAD(gc_list);
72 
73 	spin_lock_irqsave(&tls_device_lock, flags);
74 	list_splice_init(&tls_device_gc_list, &gc_list);
75 	spin_unlock_irqrestore(&tls_device_lock, flags);
76 
77 	list_for_each_entry_safe(ctx, tmp, &gc_list, list) {
78 		struct net_device *netdev = ctx->netdev;
79 
80 		if (netdev && ctx->tx_conf == TLS_HW) {
81 			netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
82 							TLS_OFFLOAD_CTX_DIR_TX);
83 			dev_put(netdev);
84 			ctx->netdev = NULL;
85 		}
86 
87 		list_del(&ctx->list);
88 		tls_device_free_ctx(ctx);
89 	}
90 }
91 
92 static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
93 {
94 	unsigned long flags;
95 
96 	spin_lock_irqsave(&tls_device_lock, flags);
97 	list_move_tail(&ctx->list, &tls_device_gc_list);
98 
99 	/* schedule_work inside the spinlock
100 	 * to make sure tls_device_down waits for that work.
101 	 */
102 	schedule_work(&tls_device_gc_work);
103 
104 	spin_unlock_irqrestore(&tls_device_lock, flags);
105 }
106 
107 /* We assume that the socket is already connected */
108 static struct net_device *get_netdev_for_sock(struct sock *sk)
109 {
110 	struct dst_entry *dst = sk_dst_get(sk);
111 	struct net_device *netdev = NULL;
112 
113 	if (likely(dst)) {
114 		netdev = dst->dev;
115 		dev_hold(netdev);
116 	}
117 
118 	dst_release(dst);
119 
120 	return netdev;
121 }
122 
123 static void destroy_record(struct tls_record_info *record)
124 {
125 	int nr_frags = record->num_frags;
126 	skb_frag_t *frag;
127 
128 	while (nr_frags-- > 0) {
129 		frag = &record->frags[nr_frags];
130 		__skb_frag_unref(frag);
131 	}
132 	kfree(record);
133 }
134 
135 static void delete_all_records(struct tls_offload_context_tx *offload_ctx)
136 {
137 	struct tls_record_info *info, *temp;
138 
139 	list_for_each_entry_safe(info, temp, &offload_ctx->records_list, list) {
140 		list_del(&info->list);
141 		destroy_record(info);
142 	}
143 
144 	offload_ctx->retransmit_hint = NULL;
145 }
146 
147 static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq)
148 {
149 	struct tls_context *tls_ctx = tls_get_ctx(sk);
150 	struct tls_record_info *info, *temp;
151 	struct tls_offload_context_tx *ctx;
152 	u64 deleted_records = 0;
153 	unsigned long flags;
154 
155 	if (!tls_ctx)
156 		return;
157 
158 	ctx = tls_offload_ctx_tx(tls_ctx);
159 
160 	spin_lock_irqsave(&ctx->lock, flags);
161 	info = ctx->retransmit_hint;
162 	if (info && !before(acked_seq, info->end_seq)) {
163 		ctx->retransmit_hint = NULL;
164 		list_del(&info->list);
165 		destroy_record(info);
166 		deleted_records++;
167 	}
168 
169 	list_for_each_entry_safe(info, temp, &ctx->records_list, list) {
170 		if (before(acked_seq, info->end_seq))
171 			break;
172 		list_del(&info->list);
173 
174 		destroy_record(info);
175 		deleted_records++;
176 	}
177 
178 	ctx->unacked_record_sn += deleted_records;
179 	spin_unlock_irqrestore(&ctx->lock, flags);
180 }
181 
182 /* At this point, there should be no references on this
183  * socket and no in-flight SKBs associated with this
184  * socket, so it is safe to free all the resources.
185  */
186 static void tls_device_sk_destruct(struct sock *sk)
187 {
188 	struct tls_context *tls_ctx = tls_get_ctx(sk);
189 	struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
190 
191 	tls_ctx->sk_destruct(sk);
192 
193 	if (tls_ctx->tx_conf == TLS_HW) {
194 		if (ctx->open_record)
195 			destroy_record(ctx->open_record);
196 		delete_all_records(ctx);
197 		crypto_free_aead(ctx->aead_send);
198 		clean_acked_data_disable(inet_csk(sk));
199 	}
200 
201 	if (refcount_dec_and_test(&tls_ctx->refcount))
202 		tls_device_queue_ctx_destruction(tls_ctx);
203 }
204 
205 void tls_device_free_resources_tx(struct sock *sk)
206 {
207 	struct tls_context *tls_ctx = tls_get_ctx(sk);
208 
209 	tls_free_partial_record(sk, tls_ctx);
210 }
211 
212 static void tls_append_frag(struct tls_record_info *record,
213 			    struct page_frag *pfrag,
214 			    int size)
215 {
216 	skb_frag_t *frag;
217 
218 	frag = &record->frags[record->num_frags - 1];
219 	if (frag->page.p == pfrag->page &&
220 	    frag->page_offset + frag->size == pfrag->offset) {
221 		frag->size += size;
222 	} else {
223 		++frag;
224 		frag->page.p = pfrag->page;
225 		frag->page_offset = pfrag->offset;
226 		frag->size = size;
227 		++record->num_frags;
228 		get_page(pfrag->page);
229 	}
230 
231 	pfrag->offset += size;
232 	record->len += size;
233 }
234 
235 static int tls_push_record(struct sock *sk,
236 			   struct tls_context *ctx,
237 			   struct tls_offload_context_tx *offload_ctx,
238 			   struct tls_record_info *record,
239 			   struct page_frag *pfrag,
240 			   int flags,
241 			   unsigned char record_type)
242 {
243 	struct tls_prot_info *prot = &ctx->prot_info;
244 	struct tcp_sock *tp = tcp_sk(sk);
245 	struct page_frag dummy_tag_frag;
246 	skb_frag_t *frag;
247 	int i;
248 
249 	/* fill prepend */
250 	frag = &record->frags[0];
251 	tls_fill_prepend(ctx,
252 			 skb_frag_address(frag),
253 			 record->len - prot->prepend_size,
254 			 record_type,
255 			 ctx->crypto_send.info.version);
256 
257 	/* HW doesn't care about the data in the tag, because it fills it. */
258 	dummy_tag_frag.page = skb_frag_page(frag);
259 	dummy_tag_frag.offset = 0;
260 
261 	tls_append_frag(record, &dummy_tag_frag, prot->tag_size);
262 	record->end_seq = tp->write_seq + record->len;
263 	spin_lock_irq(&offload_ctx->lock);
264 	list_add_tail(&record->list, &offload_ctx->records_list);
265 	spin_unlock_irq(&offload_ctx->lock);
266 	offload_ctx->open_record = NULL;
267 	tls_advance_record_sn(sk, &ctx->tx, ctx->crypto_send.info.version);
268 
269 	for (i = 0; i < record->num_frags; i++) {
270 		frag = &record->frags[i];
271 		sg_unmark_end(&offload_ctx->sg_tx_data[i]);
272 		sg_set_page(&offload_ctx->sg_tx_data[i], skb_frag_page(frag),
273 			    frag->size, frag->page_offset);
274 		sk_mem_charge(sk, frag->size);
275 		get_page(skb_frag_page(frag));
276 	}
277 	sg_mark_end(&offload_ctx->sg_tx_data[record->num_frags - 1]);
278 
279 	/* all ready, send */
280 	return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags);
281 }
282 
283 static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx,
284 				 struct page_frag *pfrag,
285 				 size_t prepend_size)
286 {
287 	struct tls_record_info *record;
288 	skb_frag_t *frag;
289 
290 	record = kmalloc(sizeof(*record), GFP_KERNEL);
291 	if (!record)
292 		return -ENOMEM;
293 
294 	frag = &record->frags[0];
295 	__skb_frag_set_page(frag, pfrag->page);
296 	frag->page_offset = pfrag->offset;
297 	skb_frag_size_set(frag, prepend_size);
298 
299 	get_page(pfrag->page);
300 	pfrag->offset += prepend_size;
301 
302 	record->num_frags = 1;
303 	record->len = prepend_size;
304 	offload_ctx->open_record = record;
305 	return 0;
306 }
307 
308 static int tls_do_allocation(struct sock *sk,
309 			     struct tls_offload_context_tx *offload_ctx,
310 			     struct page_frag *pfrag,
311 			     size_t prepend_size)
312 {
313 	int ret;
314 
315 	if (!offload_ctx->open_record) {
316 		if (unlikely(!skb_page_frag_refill(prepend_size, pfrag,
317 						   sk->sk_allocation))) {
318 			sk->sk_prot->enter_memory_pressure(sk);
319 			sk_stream_moderate_sndbuf(sk);
320 			return -ENOMEM;
321 		}
322 
323 		ret = tls_create_new_record(offload_ctx, pfrag, prepend_size);
324 		if (ret)
325 			return ret;
326 
327 		if (pfrag->size > pfrag->offset)
328 			return 0;
329 	}
330 
331 	if (!sk_page_frag_refill(sk, pfrag))
332 		return -ENOMEM;
333 
334 	return 0;
335 }
336 
337 static int tls_push_data(struct sock *sk,
338 			 struct iov_iter *msg_iter,
339 			 size_t size, int flags,
340 			 unsigned char record_type)
341 {
342 	struct tls_context *tls_ctx = tls_get_ctx(sk);
343 	struct tls_prot_info *prot = &tls_ctx->prot_info;
344 	struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
345 	int tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
346 	int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE);
347 	struct tls_record_info *record = ctx->open_record;
348 	struct page_frag *pfrag;
349 	size_t orig_size = size;
350 	u32 max_open_record_len;
351 	int copy, rc = 0;
352 	bool done = false;
353 	long timeo;
354 
355 	if (flags &
356 	    ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST))
357 		return -ENOTSUPP;
358 
359 	if (sk->sk_err)
360 		return -sk->sk_err;
361 
362 	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
363 	if (tls_is_partially_sent_record(tls_ctx)) {
364 		rc = tls_push_partial_record(sk, tls_ctx, flags);
365 		if (rc < 0)
366 			return rc;
367 	}
368 
369 	pfrag = sk_page_frag(sk);
370 
371 	/* TLS_HEADER_SIZE is not counted as part of the TLS record, and
372 	 * we need to leave room for an authentication tag.
373 	 */
374 	max_open_record_len = TLS_MAX_PAYLOAD_SIZE +
375 			      prot->prepend_size;
376 	do {
377 		rc = tls_do_allocation(sk, ctx, pfrag,
378 				       prot->prepend_size);
379 		if (rc) {
380 			rc = sk_stream_wait_memory(sk, &timeo);
381 			if (!rc)
382 				continue;
383 
384 			record = ctx->open_record;
385 			if (!record)
386 				break;
387 handle_error:
388 			if (record_type != TLS_RECORD_TYPE_DATA) {
389 				/* avoid sending partial
390 				 * record with type !=
391 				 * application_data
392 				 */
393 				size = orig_size;
394 				destroy_record(record);
395 				ctx->open_record = NULL;
396 			} else if (record->len > prot->prepend_size) {
397 				goto last_record;
398 			}
399 
400 			break;
401 		}
402 
403 		record = ctx->open_record;
404 		copy = min_t(size_t, size, (pfrag->size - pfrag->offset));
405 		copy = min_t(size_t, copy, (max_open_record_len - record->len));
406 
407 		if (copy_from_iter_nocache(page_address(pfrag->page) +
408 					       pfrag->offset,
409 					   copy, msg_iter) != copy) {
410 			rc = -EFAULT;
411 			goto handle_error;
412 		}
413 		tls_append_frag(record, pfrag, copy);
414 
415 		size -= copy;
416 		if (!size) {
417 last_record:
418 			tls_push_record_flags = flags;
419 			if (more) {
420 				tls_ctx->pending_open_record_frags =
421 						!!record->num_frags;
422 				break;
423 			}
424 
425 			done = true;
426 		}
427 
428 		if (done || record->len >= max_open_record_len ||
429 		    (record->num_frags >= MAX_SKB_FRAGS - 1)) {
430 			rc = tls_push_record(sk,
431 					     tls_ctx,
432 					     ctx,
433 					     record,
434 					     pfrag,
435 					     tls_push_record_flags,
436 					     record_type);
437 			if (rc < 0)
438 				break;
439 		}
440 	} while (!done);
441 
442 	if (orig_size - size > 0)
443 		rc = orig_size - size;
444 
445 	return rc;
446 }
447 
448 int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
449 {
450 	unsigned char record_type = TLS_RECORD_TYPE_DATA;
451 	int rc;
452 
453 	lock_sock(sk);
454 
455 	if (unlikely(msg->msg_controllen)) {
456 		rc = tls_proccess_cmsg(sk, msg, &record_type);
457 		if (rc)
458 			goto out;
459 	}
460 
461 	rc = tls_push_data(sk, &msg->msg_iter, size,
462 			   msg->msg_flags, record_type);
463 
464 out:
465 	release_sock(sk);
466 	return rc;
467 }
468 
469 int tls_device_sendpage(struct sock *sk, struct page *page,
470 			int offset, size_t size, int flags)
471 {
472 	struct iov_iter	msg_iter;
473 	char *kaddr = kmap(page);
474 	struct kvec iov;
475 	int rc;
476 
477 	if (flags & MSG_SENDPAGE_NOTLAST)
478 		flags |= MSG_MORE;
479 
480 	lock_sock(sk);
481 
482 	if (flags & MSG_OOB) {
483 		rc = -ENOTSUPP;
484 		goto out;
485 	}
486 
487 	iov.iov_base = kaddr + offset;
488 	iov.iov_len = size;
489 	iov_iter_kvec(&msg_iter, WRITE, &iov, 1, size);
490 	rc = tls_push_data(sk, &msg_iter, size,
491 			   flags, TLS_RECORD_TYPE_DATA);
492 	kunmap(page);
493 
494 out:
495 	release_sock(sk);
496 	return rc;
497 }
498 
499 struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
500 				       u32 seq, u64 *p_record_sn)
501 {
502 	u64 record_sn = context->hint_record_sn;
503 	struct tls_record_info *info;
504 
505 	info = context->retransmit_hint;
506 	if (!info ||
507 	    before(seq, info->end_seq - info->len)) {
508 		/* if retransmit_hint is irrelevant start
509 		 * from the beggining of the list
510 		 */
511 		info = list_first_entry(&context->records_list,
512 					struct tls_record_info, list);
513 		record_sn = context->unacked_record_sn;
514 	}
515 
516 	list_for_each_entry_from(info, &context->records_list, list) {
517 		if (before(seq, info->end_seq)) {
518 			if (!context->retransmit_hint ||
519 			    after(info->end_seq,
520 				  context->retransmit_hint->end_seq)) {
521 				context->hint_record_sn = record_sn;
522 				context->retransmit_hint = info;
523 			}
524 			*p_record_sn = record_sn;
525 			return info;
526 		}
527 		record_sn++;
528 	}
529 
530 	return NULL;
531 }
532 EXPORT_SYMBOL(tls_get_record);
533 
534 static int tls_device_push_pending_record(struct sock *sk, int flags)
535 {
536 	struct iov_iter	msg_iter;
537 
538 	iov_iter_kvec(&msg_iter, WRITE, NULL, 0, 0);
539 	return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA);
540 }
541 
542 void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
543 {
544 	if (!sk->sk_write_pending && tls_is_partially_sent_record(ctx)) {
545 		gfp_t sk_allocation = sk->sk_allocation;
546 
547 		sk->sk_allocation = GFP_ATOMIC;
548 		tls_push_partial_record(sk, ctx, MSG_DONTWAIT | MSG_NOSIGNAL);
549 		sk->sk_allocation = sk_allocation;
550 	}
551 }
552 
553 void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
554 {
555 	struct tls_context *tls_ctx = tls_get_ctx(sk);
556 	struct net_device *netdev = tls_ctx->netdev;
557 	struct tls_offload_context_rx *rx_ctx;
558 	u32 is_req_pending;
559 	s64 resync_req;
560 	u32 req_seq;
561 
562 	if (tls_ctx->rx_conf != TLS_HW)
563 		return;
564 
565 	rx_ctx = tls_offload_ctx_rx(tls_ctx);
566 	resync_req = atomic64_read(&rx_ctx->resync_req);
567 	req_seq = (resync_req >> 32) - ((u32)TLS_HEADER_SIZE - 1);
568 	is_req_pending = resync_req;
569 
570 	if (unlikely(is_req_pending) && req_seq == seq &&
571 	    atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
572 		netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk,
573 						      seq + TLS_HEADER_SIZE - 1,
574 						      rcd_sn);
575 }
576 
577 static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
578 {
579 	struct strp_msg *rxm = strp_msg(skb);
580 	int err = 0, offset = rxm->offset, copy, nsg, data_len, pos;
581 	struct sk_buff *skb_iter, *unused;
582 	struct scatterlist sg[1];
583 	char *orig_buf, *buf;
584 
585 	orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE +
586 			   TLS_CIPHER_AES_GCM_128_IV_SIZE, sk->sk_allocation);
587 	if (!orig_buf)
588 		return -ENOMEM;
589 	buf = orig_buf;
590 
591 	nsg = skb_cow_data(skb, 0, &unused);
592 	if (unlikely(nsg < 0)) {
593 		err = nsg;
594 		goto free_buf;
595 	}
596 
597 	sg_init_table(sg, 1);
598 	sg_set_buf(&sg[0], buf,
599 		   rxm->full_len + TLS_HEADER_SIZE +
600 		   TLS_CIPHER_AES_GCM_128_IV_SIZE);
601 	skb_copy_bits(skb, offset, buf,
602 		      TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE);
603 
604 	/* We are interested only in the decrypted data not the auth */
605 	err = decrypt_skb(sk, skb, sg);
606 	if (err != -EBADMSG)
607 		goto free_buf;
608 	else
609 		err = 0;
610 
611 	data_len = rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE;
612 
613 	if (skb_pagelen(skb) > offset) {
614 		copy = min_t(int, skb_pagelen(skb) - offset, data_len);
615 
616 		if (skb->decrypted)
617 			skb_store_bits(skb, offset, buf, copy);
618 
619 		offset += copy;
620 		buf += copy;
621 	}
622 
623 	pos = skb_pagelen(skb);
624 	skb_walk_frags(skb, skb_iter) {
625 		int frag_pos;
626 
627 		/* Practically all frags must belong to msg if reencrypt
628 		 * is needed with current strparser and coalescing logic,
629 		 * but strparser may "get optimized", so let's be safe.
630 		 */
631 		if (pos + skb_iter->len <= offset)
632 			goto done_with_frag;
633 		if (pos >= data_len + rxm->offset)
634 			break;
635 
636 		frag_pos = offset - pos;
637 		copy = min_t(int, skb_iter->len - frag_pos,
638 			     data_len + rxm->offset - offset);
639 
640 		if (skb_iter->decrypted)
641 			skb_store_bits(skb_iter, frag_pos, buf, copy);
642 
643 		offset += copy;
644 		buf += copy;
645 done_with_frag:
646 		pos += skb_iter->len;
647 	}
648 
649 free_buf:
650 	kfree(orig_buf);
651 	return err;
652 }
653 
654 int tls_device_decrypted(struct sock *sk, struct sk_buff *skb)
655 {
656 	struct tls_context *tls_ctx = tls_get_ctx(sk);
657 	struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx);
658 	int is_decrypted = skb->decrypted;
659 	int is_encrypted = !is_decrypted;
660 	struct sk_buff *skb_iter;
661 
662 	/* Skip if it is already decrypted */
663 	if (ctx->sw.decrypted)
664 		return 0;
665 
666 	/* Check if all the data is decrypted already */
667 	skb_walk_frags(skb, skb_iter) {
668 		is_decrypted &= skb_iter->decrypted;
669 		is_encrypted &= !skb_iter->decrypted;
670 	}
671 
672 	ctx->sw.decrypted |= is_decrypted;
673 
674 	/* Return immedeatly if the record is either entirely plaintext or
675 	 * entirely ciphertext. Otherwise handle reencrypt partially decrypted
676 	 * record.
677 	 */
678 	return (is_encrypted || is_decrypted) ? 0 :
679 		tls_device_reencrypt(sk, skb);
680 }
681 
682 static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
683 			      struct net_device *netdev)
684 {
685 	if (sk->sk_destruct != tls_device_sk_destruct) {
686 		refcount_set(&ctx->refcount, 1);
687 		dev_hold(netdev);
688 		ctx->netdev = netdev;
689 		spin_lock_irq(&tls_device_lock);
690 		list_add_tail(&ctx->list, &tls_device_list);
691 		spin_unlock_irq(&tls_device_lock);
692 
693 		ctx->sk_destruct = sk->sk_destruct;
694 		sk->sk_destruct = tls_device_sk_destruct;
695 	}
696 }
697 
698 int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
699 {
700 	u16 nonce_size, tag_size, iv_size, rec_seq_size;
701 	struct tls_context *tls_ctx = tls_get_ctx(sk);
702 	struct tls_prot_info *prot = &tls_ctx->prot_info;
703 	struct tls_record_info *start_marker_record;
704 	struct tls_offload_context_tx *offload_ctx;
705 	struct tls_crypto_info *crypto_info;
706 	struct net_device *netdev;
707 	char *iv, *rec_seq;
708 	struct sk_buff *skb;
709 	int rc = -EINVAL;
710 	__be64 rcd_sn;
711 
712 	if (!ctx)
713 		goto out;
714 
715 	if (ctx->priv_ctx_tx) {
716 		rc = -EEXIST;
717 		goto out;
718 	}
719 
720 	start_marker_record = kmalloc(sizeof(*start_marker_record), GFP_KERNEL);
721 	if (!start_marker_record) {
722 		rc = -ENOMEM;
723 		goto out;
724 	}
725 
726 	offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_TX, GFP_KERNEL);
727 	if (!offload_ctx) {
728 		rc = -ENOMEM;
729 		goto free_marker_record;
730 	}
731 
732 	crypto_info = &ctx->crypto_send.info;
733 	switch (crypto_info->cipher_type) {
734 	case TLS_CIPHER_AES_GCM_128:
735 		nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
736 		tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
737 		iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
738 		iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
739 		rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
740 		rec_seq =
741 		 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
742 		break;
743 	default:
744 		rc = -EINVAL;
745 		goto free_offload_ctx;
746 	}
747 
748 	prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
749 	prot->tag_size = tag_size;
750 	prot->overhead_size = prot->prepend_size + prot->tag_size;
751 	prot->iv_size = iv_size;
752 	ctx->tx.iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
753 			     GFP_KERNEL);
754 	if (!ctx->tx.iv) {
755 		rc = -ENOMEM;
756 		goto free_offload_ctx;
757 	}
758 
759 	memcpy(ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
760 
761 	prot->rec_seq_size = rec_seq_size;
762 	ctx->tx.rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
763 	if (!ctx->tx.rec_seq) {
764 		rc = -ENOMEM;
765 		goto free_iv;
766 	}
767 
768 	rc = tls_sw_fallback_init(sk, offload_ctx, crypto_info);
769 	if (rc)
770 		goto free_rec_seq;
771 
772 	/* start at rec_seq - 1 to account for the start marker record */
773 	memcpy(&rcd_sn, ctx->tx.rec_seq, sizeof(rcd_sn));
774 	offload_ctx->unacked_record_sn = be64_to_cpu(rcd_sn) - 1;
775 
776 	start_marker_record->end_seq = tcp_sk(sk)->write_seq;
777 	start_marker_record->len = 0;
778 	start_marker_record->num_frags = 0;
779 
780 	INIT_LIST_HEAD(&offload_ctx->records_list);
781 	list_add_tail(&start_marker_record->list, &offload_ctx->records_list);
782 	spin_lock_init(&offload_ctx->lock);
783 	sg_init_table(offload_ctx->sg_tx_data,
784 		      ARRAY_SIZE(offload_ctx->sg_tx_data));
785 
786 	clean_acked_data_enable(inet_csk(sk), &tls_icsk_clean_acked);
787 	ctx->push_pending_record = tls_device_push_pending_record;
788 
789 	/* TLS offload is greatly simplified if we don't send
790 	 * SKBs where only part of the payload needs to be encrypted.
791 	 * So mark the last skb in the write queue as end of record.
792 	 */
793 	skb = tcp_write_queue_tail(sk);
794 	if (skb)
795 		TCP_SKB_CB(skb)->eor = 1;
796 
797 	/* We support starting offload on multiple sockets
798 	 * concurrently, so we only need a read lock here.
799 	 * This lock must precede get_netdev_for_sock to prevent races between
800 	 * NETDEV_DOWN and setsockopt.
801 	 */
802 	down_read(&device_offload_lock);
803 	netdev = get_netdev_for_sock(sk);
804 	if (!netdev) {
805 		pr_err_ratelimited("%s: netdev not found\n", __func__);
806 		rc = -EINVAL;
807 		goto release_lock;
808 	}
809 
810 	if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
811 		rc = -ENOTSUPP;
812 		goto release_netdev;
813 	}
814 
815 	/* Avoid offloading if the device is down
816 	 * We don't want to offload new flows after
817 	 * the NETDEV_DOWN event
818 	 */
819 	if (!(netdev->flags & IFF_UP)) {
820 		rc = -EINVAL;
821 		goto release_netdev;
822 	}
823 
824 	ctx->priv_ctx_tx = offload_ctx;
825 	rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
826 					     &ctx->crypto_send.info,
827 					     tcp_sk(sk)->write_seq);
828 	if (rc)
829 		goto release_netdev;
830 
831 	tls_device_attach(ctx, sk, netdev);
832 
833 	/* following this assignment tls_is_sk_tx_device_offloaded
834 	 * will return true and the context might be accessed
835 	 * by the netdev's xmit function.
836 	 */
837 	smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb);
838 	dev_put(netdev);
839 	up_read(&device_offload_lock);
840 	goto out;
841 
842 release_netdev:
843 	dev_put(netdev);
844 release_lock:
845 	up_read(&device_offload_lock);
846 	clean_acked_data_disable(inet_csk(sk));
847 	crypto_free_aead(offload_ctx->aead_send);
848 free_rec_seq:
849 	kfree(ctx->tx.rec_seq);
850 free_iv:
851 	kfree(ctx->tx.iv);
852 free_offload_ctx:
853 	kfree(offload_ctx);
854 	ctx->priv_ctx_tx = NULL;
855 free_marker_record:
856 	kfree(start_marker_record);
857 out:
858 	return rc;
859 }
860 
861 int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
862 {
863 	struct tls_offload_context_rx *context;
864 	struct net_device *netdev;
865 	int rc = 0;
866 
867 	/* We support starting offload on multiple sockets
868 	 * concurrently, so we only need a read lock here.
869 	 * This lock must precede get_netdev_for_sock to prevent races between
870 	 * NETDEV_DOWN and setsockopt.
871 	 */
872 	down_read(&device_offload_lock);
873 	netdev = get_netdev_for_sock(sk);
874 	if (!netdev) {
875 		pr_err_ratelimited("%s: netdev not found\n", __func__);
876 		rc = -EINVAL;
877 		goto release_lock;
878 	}
879 
880 	if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
881 		rc = -ENOTSUPP;
882 		goto release_netdev;
883 	}
884 
885 	/* Avoid offloading if the device is down
886 	 * We don't want to offload new flows after
887 	 * the NETDEV_DOWN event
888 	 */
889 	if (!(netdev->flags & IFF_UP)) {
890 		rc = -EINVAL;
891 		goto release_netdev;
892 	}
893 
894 	context = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX, GFP_KERNEL);
895 	if (!context) {
896 		rc = -ENOMEM;
897 		goto release_netdev;
898 	}
899 
900 	ctx->priv_ctx_rx = context;
901 	rc = tls_set_sw_offload(sk, ctx, 0);
902 	if (rc)
903 		goto release_ctx;
904 
905 	rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
906 					     &ctx->crypto_recv.info,
907 					     tcp_sk(sk)->copied_seq);
908 	if (rc)
909 		goto free_sw_resources;
910 
911 	tls_device_attach(ctx, sk, netdev);
912 	goto release_netdev;
913 
914 free_sw_resources:
915 	up_read(&device_offload_lock);
916 	tls_sw_free_resources_rx(sk);
917 	down_read(&device_offload_lock);
918 release_ctx:
919 	ctx->priv_ctx_rx = NULL;
920 release_netdev:
921 	dev_put(netdev);
922 release_lock:
923 	up_read(&device_offload_lock);
924 	return rc;
925 }
926 
927 void tls_device_offload_cleanup_rx(struct sock *sk)
928 {
929 	struct tls_context *tls_ctx = tls_get_ctx(sk);
930 	struct net_device *netdev;
931 
932 	down_read(&device_offload_lock);
933 	netdev = tls_ctx->netdev;
934 	if (!netdev)
935 		goto out;
936 
937 	if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
938 		pr_err_ratelimited("%s: device is missing NETIF_F_HW_TLS_RX cap\n",
939 				   __func__);
940 		goto out;
941 	}
942 
943 	netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx,
944 					TLS_OFFLOAD_CTX_DIR_RX);
945 
946 	if (tls_ctx->tx_conf != TLS_HW) {
947 		dev_put(netdev);
948 		tls_ctx->netdev = NULL;
949 	}
950 out:
951 	up_read(&device_offload_lock);
952 	tls_sw_release_resources_rx(sk);
953 }
954 
955 static int tls_device_down(struct net_device *netdev)
956 {
957 	struct tls_context *ctx, *tmp;
958 	unsigned long flags;
959 	LIST_HEAD(list);
960 
961 	/* Request a write lock to block new offload attempts */
962 	down_write(&device_offload_lock);
963 
964 	spin_lock_irqsave(&tls_device_lock, flags);
965 	list_for_each_entry_safe(ctx, tmp, &tls_device_list, list) {
966 		if (ctx->netdev != netdev ||
967 		    !refcount_inc_not_zero(&ctx->refcount))
968 			continue;
969 
970 		list_move(&ctx->list, &list);
971 	}
972 	spin_unlock_irqrestore(&tls_device_lock, flags);
973 
974 	list_for_each_entry_safe(ctx, tmp, &list, list)	{
975 		if (ctx->tx_conf == TLS_HW)
976 			netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
977 							TLS_OFFLOAD_CTX_DIR_TX);
978 		if (ctx->rx_conf == TLS_HW)
979 			netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
980 							TLS_OFFLOAD_CTX_DIR_RX);
981 		ctx->netdev = NULL;
982 		dev_put(netdev);
983 		list_del_init(&ctx->list);
984 
985 		if (refcount_dec_and_test(&ctx->refcount))
986 			tls_device_free_ctx(ctx);
987 	}
988 
989 	up_write(&device_offload_lock);
990 
991 	flush_work(&tls_device_gc_work);
992 
993 	return NOTIFY_DONE;
994 }
995 
996 static int tls_dev_event(struct notifier_block *this, unsigned long event,
997 			 void *ptr)
998 {
999 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1000 
1001 	if (!(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX)))
1002 		return NOTIFY_DONE;
1003 
1004 	switch (event) {
1005 	case NETDEV_REGISTER:
1006 	case NETDEV_FEAT_CHANGE:
1007 		if ((dev->features & NETIF_F_HW_TLS_RX) &&
1008 		    !dev->tlsdev_ops->tls_dev_resync_rx)
1009 			return NOTIFY_BAD;
1010 
1011 		if  (dev->tlsdev_ops &&
1012 		     dev->tlsdev_ops->tls_dev_add &&
1013 		     dev->tlsdev_ops->tls_dev_del)
1014 			return NOTIFY_DONE;
1015 		else
1016 			return NOTIFY_BAD;
1017 	case NETDEV_DOWN:
1018 		return tls_device_down(dev);
1019 	}
1020 	return NOTIFY_DONE;
1021 }
1022 
1023 static struct notifier_block tls_dev_notifier = {
1024 	.notifier_call	= tls_dev_event,
1025 };
1026 
1027 void __init tls_device_init(void)
1028 {
1029 	register_netdevice_notifier(&tls_dev_notifier);
1030 }
1031 
1032 void __exit tls_device_cleanup(void)
1033 {
1034 	unregister_netdevice_notifier(&tls_dev_notifier);
1035 	flush_work(&tls_device_gc_work);
1036 	clean_acked_data_flush();
1037 }
1038