xref: /openbmc/linux/net/tls/tls_main.c (revision 62e59c4e)
1 /*
2  * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3  * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/module.h>
35 
36 #include <net/tcp.h>
37 #include <net/inet_common.h>
38 #include <linux/highmem.h>
39 #include <linux/netdevice.h>
40 #include <linux/sched/signal.h>
41 #include <linux/inetdevice.h>
42 
43 #include <net/tls.h>
44 
45 MODULE_AUTHOR("Mellanox Technologies");
46 MODULE_DESCRIPTION("Transport Layer Security Support");
47 MODULE_LICENSE("Dual BSD/GPL");
48 MODULE_ALIAS_TCP_ULP("tls");
49 
50 enum {
51 	TLSV4,
52 	TLSV6,
53 	TLS_NUM_PROTS,
54 };
55 
56 static struct proto *saved_tcpv6_prot;
57 static DEFINE_MUTEX(tcpv6_prot_mutex);
58 static struct proto *saved_tcpv4_prot;
59 static DEFINE_MUTEX(tcpv4_prot_mutex);
60 static LIST_HEAD(device_list);
61 static DEFINE_SPINLOCK(device_spinlock);
62 static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
63 static struct proto_ops tls_sw_proto_ops;
64 static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
65 			 struct proto *base);
66 
67 static void update_sk_prot(struct sock *sk, struct tls_context *ctx)
68 {
69 	int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
70 
71 	sk->sk_prot = &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf];
72 }
73 
74 int wait_on_pending_writer(struct sock *sk, long *timeo)
75 {
76 	int rc = 0;
77 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
78 
79 	add_wait_queue(sk_sleep(sk), &wait);
80 	while (1) {
81 		if (!*timeo) {
82 			rc = -EAGAIN;
83 			break;
84 		}
85 
86 		if (signal_pending(current)) {
87 			rc = sock_intr_errno(*timeo);
88 			break;
89 		}
90 
91 		if (sk_wait_event(sk, timeo, !sk->sk_write_pending, &wait))
92 			break;
93 	}
94 	remove_wait_queue(sk_sleep(sk), &wait);
95 	return rc;
96 }
97 
98 int tls_push_sg(struct sock *sk,
99 		struct tls_context *ctx,
100 		struct scatterlist *sg,
101 		u16 first_offset,
102 		int flags)
103 {
104 	int sendpage_flags = flags | MSG_SENDPAGE_NOTLAST;
105 	int ret = 0;
106 	struct page *p;
107 	size_t size;
108 	int offset = first_offset;
109 
110 	size = sg->length - offset;
111 	offset += sg->offset;
112 
113 	ctx->in_tcp_sendpages = true;
114 	while (1) {
115 		if (sg_is_last(sg))
116 			sendpage_flags = flags;
117 
118 		/* is sending application-limited? */
119 		tcp_rate_check_app_limited(sk);
120 		p = sg_page(sg);
121 retry:
122 		ret = do_tcp_sendpages(sk, p, offset, size, sendpage_flags);
123 
124 		if (ret != size) {
125 			if (ret > 0) {
126 				offset += ret;
127 				size -= ret;
128 				goto retry;
129 			}
130 
131 			offset -= sg->offset;
132 			ctx->partially_sent_offset = offset;
133 			ctx->partially_sent_record = (void *)sg;
134 			ctx->in_tcp_sendpages = false;
135 			return ret;
136 		}
137 
138 		put_page(p);
139 		sk_mem_uncharge(sk, sg->length);
140 		sg = sg_next(sg);
141 		if (!sg)
142 			break;
143 
144 		offset = sg->offset;
145 		size = sg->length;
146 	}
147 
148 	ctx->in_tcp_sendpages = false;
149 
150 	return 0;
151 }
152 
153 static int tls_handle_open_record(struct sock *sk, int flags)
154 {
155 	struct tls_context *ctx = tls_get_ctx(sk);
156 
157 	if (tls_is_pending_open_record(ctx))
158 		return ctx->push_pending_record(sk, flags);
159 
160 	return 0;
161 }
162 
163 int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
164 		      unsigned char *record_type)
165 {
166 	struct cmsghdr *cmsg;
167 	int rc = -EINVAL;
168 
169 	for_each_cmsghdr(cmsg, msg) {
170 		if (!CMSG_OK(msg, cmsg))
171 			return -EINVAL;
172 		if (cmsg->cmsg_level != SOL_TLS)
173 			continue;
174 
175 		switch (cmsg->cmsg_type) {
176 		case TLS_SET_RECORD_TYPE:
177 			if (cmsg->cmsg_len < CMSG_LEN(sizeof(*record_type)))
178 				return -EINVAL;
179 
180 			if (msg->msg_flags & MSG_MORE)
181 				return -EINVAL;
182 
183 			rc = tls_handle_open_record(sk, msg->msg_flags);
184 			if (rc)
185 				return rc;
186 
187 			*record_type = *(unsigned char *)CMSG_DATA(cmsg);
188 			rc = 0;
189 			break;
190 		default:
191 			return -EINVAL;
192 		}
193 	}
194 
195 	return rc;
196 }
197 
198 int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
199 			    int flags)
200 {
201 	struct scatterlist *sg;
202 	u16 offset;
203 
204 	sg = ctx->partially_sent_record;
205 	offset = ctx->partially_sent_offset;
206 
207 	ctx->partially_sent_record = NULL;
208 	return tls_push_sg(sk, ctx, sg, offset, flags);
209 }
210 
211 bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx)
212 {
213 	struct scatterlist *sg;
214 
215 	sg = ctx->partially_sent_record;
216 	if (!sg)
217 		return false;
218 
219 	while (1) {
220 		put_page(sg_page(sg));
221 		sk_mem_uncharge(sk, sg->length);
222 
223 		if (sg_is_last(sg))
224 			break;
225 		sg++;
226 	}
227 	ctx->partially_sent_record = NULL;
228 	return true;
229 }
230 
231 static void tls_write_space(struct sock *sk)
232 {
233 	struct tls_context *ctx = tls_get_ctx(sk);
234 
235 	/* If in_tcp_sendpages call lower protocol write space handler
236 	 * to ensure we wake up any waiting operations there. For example
237 	 * if do_tcp_sendpages where to call sk_wait_event.
238 	 */
239 	if (ctx->in_tcp_sendpages) {
240 		ctx->sk_write_space(sk);
241 		return;
242 	}
243 
244 #ifdef CONFIG_TLS_DEVICE
245 	if (ctx->tx_conf == TLS_HW)
246 		tls_device_write_space(sk, ctx);
247 	else
248 #endif
249 		tls_sw_write_space(sk, ctx);
250 
251 	ctx->sk_write_space(sk);
252 }
253 
254 static void tls_ctx_free(struct tls_context *ctx)
255 {
256 	if (!ctx)
257 		return;
258 
259 	memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send));
260 	memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv));
261 	kfree(ctx);
262 }
263 
264 static void tls_sk_proto_close(struct sock *sk, long timeout)
265 {
266 	struct tls_context *ctx = tls_get_ctx(sk);
267 	long timeo = sock_sndtimeo(sk, 0);
268 	void (*sk_proto_close)(struct sock *sk, long timeout);
269 	bool free_ctx = false;
270 
271 	lock_sock(sk);
272 	sk_proto_close = ctx->sk_proto_close;
273 
274 	if (ctx->tx_conf == TLS_HW_RECORD && ctx->rx_conf == TLS_HW_RECORD)
275 		goto skip_tx_cleanup;
276 
277 	if (ctx->tx_conf == TLS_BASE && ctx->rx_conf == TLS_BASE) {
278 		free_ctx = true;
279 		goto skip_tx_cleanup;
280 	}
281 
282 	if (!tls_complete_pending_work(sk, ctx, 0, &timeo))
283 		tls_handle_open_record(sk, 0);
284 
285 	/* We need these for tls_sw_fallback handling of other packets */
286 	if (ctx->tx_conf == TLS_SW) {
287 		kfree(ctx->tx.rec_seq);
288 		kfree(ctx->tx.iv);
289 		tls_sw_free_resources_tx(sk);
290 #ifdef CONFIG_TLS_DEVICE
291 	} else if (ctx->tx_conf == TLS_HW) {
292 		tls_device_free_resources_tx(sk);
293 #endif
294 	}
295 
296 	if (ctx->rx_conf == TLS_SW)
297 		tls_sw_free_resources_rx(sk);
298 
299 #ifdef CONFIG_TLS_DEVICE
300 	if (ctx->rx_conf == TLS_HW)
301 		tls_device_offload_cleanup_rx(sk);
302 
303 	if (ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW) {
304 #else
305 	{
306 #endif
307 		tls_ctx_free(ctx);
308 		ctx = NULL;
309 	}
310 
311 skip_tx_cleanup:
312 	release_sock(sk);
313 	sk_proto_close(sk, timeout);
314 	/* free ctx for TLS_HW_RECORD, used by tcp_set_state
315 	 * for sk->sk_prot->unhash [tls_hw_unhash]
316 	 */
317 	if (free_ctx)
318 		tls_ctx_free(ctx);
319 }
320 
321 static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
322 				int __user *optlen)
323 {
324 	int rc = 0;
325 	struct tls_context *ctx = tls_get_ctx(sk);
326 	struct tls_crypto_info *crypto_info;
327 	int len;
328 
329 	if (get_user(len, optlen))
330 		return -EFAULT;
331 
332 	if (!optval || (len < sizeof(*crypto_info))) {
333 		rc = -EINVAL;
334 		goto out;
335 	}
336 
337 	if (!ctx) {
338 		rc = -EBUSY;
339 		goto out;
340 	}
341 
342 	/* get user crypto info */
343 	crypto_info = &ctx->crypto_send.info;
344 
345 	if (!TLS_CRYPTO_INFO_READY(crypto_info)) {
346 		rc = -EBUSY;
347 		goto out;
348 	}
349 
350 	if (len == sizeof(*crypto_info)) {
351 		if (copy_to_user(optval, crypto_info, sizeof(*crypto_info)))
352 			rc = -EFAULT;
353 		goto out;
354 	}
355 
356 	switch (crypto_info->cipher_type) {
357 	case TLS_CIPHER_AES_GCM_128: {
358 		struct tls12_crypto_info_aes_gcm_128 *
359 		  crypto_info_aes_gcm_128 =
360 		  container_of(crypto_info,
361 			       struct tls12_crypto_info_aes_gcm_128,
362 			       info);
363 
364 		if (len != sizeof(*crypto_info_aes_gcm_128)) {
365 			rc = -EINVAL;
366 			goto out;
367 		}
368 		lock_sock(sk);
369 		memcpy(crypto_info_aes_gcm_128->iv,
370 		       ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
371 		       TLS_CIPHER_AES_GCM_128_IV_SIZE);
372 		memcpy(crypto_info_aes_gcm_128->rec_seq, ctx->tx.rec_seq,
373 		       TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
374 		release_sock(sk);
375 		if (copy_to_user(optval,
376 				 crypto_info_aes_gcm_128,
377 				 sizeof(*crypto_info_aes_gcm_128)))
378 			rc = -EFAULT;
379 		break;
380 	}
381 	case TLS_CIPHER_AES_GCM_256: {
382 		struct tls12_crypto_info_aes_gcm_256 *
383 		  crypto_info_aes_gcm_256 =
384 		  container_of(crypto_info,
385 			       struct tls12_crypto_info_aes_gcm_256,
386 			       info);
387 
388 		if (len != sizeof(*crypto_info_aes_gcm_256)) {
389 			rc = -EINVAL;
390 			goto out;
391 		}
392 		lock_sock(sk);
393 		memcpy(crypto_info_aes_gcm_256->iv,
394 		       ctx->tx.iv + TLS_CIPHER_AES_GCM_256_SALT_SIZE,
395 		       TLS_CIPHER_AES_GCM_256_IV_SIZE);
396 		memcpy(crypto_info_aes_gcm_256->rec_seq, ctx->tx.rec_seq,
397 		       TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE);
398 		release_sock(sk);
399 		if (copy_to_user(optval,
400 				 crypto_info_aes_gcm_256,
401 				 sizeof(*crypto_info_aes_gcm_256)))
402 			rc = -EFAULT;
403 		break;
404 	}
405 	default:
406 		rc = -EINVAL;
407 	}
408 
409 out:
410 	return rc;
411 }
412 
413 static int do_tls_getsockopt(struct sock *sk, int optname,
414 			     char __user *optval, int __user *optlen)
415 {
416 	int rc = 0;
417 
418 	switch (optname) {
419 	case TLS_TX:
420 		rc = do_tls_getsockopt_tx(sk, optval, optlen);
421 		break;
422 	default:
423 		rc = -ENOPROTOOPT;
424 		break;
425 	}
426 	return rc;
427 }
428 
429 static int tls_getsockopt(struct sock *sk, int level, int optname,
430 			  char __user *optval, int __user *optlen)
431 {
432 	struct tls_context *ctx = tls_get_ctx(sk);
433 
434 	if (level != SOL_TLS)
435 		return ctx->getsockopt(sk, level, optname, optval, optlen);
436 
437 	return do_tls_getsockopt(sk, optname, optval, optlen);
438 }
439 
440 static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
441 				  unsigned int optlen, int tx)
442 {
443 	struct tls_crypto_info *crypto_info;
444 	struct tls_crypto_info *alt_crypto_info;
445 	struct tls_context *ctx = tls_get_ctx(sk);
446 	size_t optsize;
447 	int rc = 0;
448 	int conf;
449 
450 	if (!optval || (optlen < sizeof(*crypto_info))) {
451 		rc = -EINVAL;
452 		goto out;
453 	}
454 
455 	if (tx) {
456 		crypto_info = &ctx->crypto_send.info;
457 		alt_crypto_info = &ctx->crypto_recv.info;
458 	} else {
459 		crypto_info = &ctx->crypto_recv.info;
460 		alt_crypto_info = &ctx->crypto_send.info;
461 	}
462 
463 	/* Currently we don't support set crypto info more than one time */
464 	if (TLS_CRYPTO_INFO_READY(crypto_info)) {
465 		rc = -EBUSY;
466 		goto out;
467 	}
468 
469 	rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info));
470 	if (rc) {
471 		rc = -EFAULT;
472 		goto err_crypto_info;
473 	}
474 
475 	/* check version */
476 	if (crypto_info->version != TLS_1_2_VERSION &&
477 	    crypto_info->version != TLS_1_3_VERSION) {
478 		rc = -ENOTSUPP;
479 		goto err_crypto_info;
480 	}
481 
482 	/* Ensure that TLS version and ciphers are same in both directions */
483 	if (TLS_CRYPTO_INFO_READY(alt_crypto_info)) {
484 		if (alt_crypto_info->version != crypto_info->version ||
485 		    alt_crypto_info->cipher_type != crypto_info->cipher_type) {
486 			rc = -EINVAL;
487 			goto err_crypto_info;
488 		}
489 	}
490 
491 	switch (crypto_info->cipher_type) {
492 	case TLS_CIPHER_AES_GCM_128:
493 	case TLS_CIPHER_AES_GCM_256: {
494 		optsize = crypto_info->cipher_type == TLS_CIPHER_AES_GCM_128 ?
495 			sizeof(struct tls12_crypto_info_aes_gcm_128) :
496 			sizeof(struct tls12_crypto_info_aes_gcm_256);
497 		if (optlen != optsize) {
498 			rc = -EINVAL;
499 			goto err_crypto_info;
500 		}
501 		rc = copy_from_user(crypto_info + 1, optval + sizeof(*crypto_info),
502 				    optlen - sizeof(*crypto_info));
503 		if (rc) {
504 			rc = -EFAULT;
505 			goto err_crypto_info;
506 		}
507 		break;
508 	}
509 	default:
510 		rc = -EINVAL;
511 		goto err_crypto_info;
512 	}
513 
514 	if (tx) {
515 #ifdef CONFIG_TLS_DEVICE
516 		rc = tls_set_device_offload(sk, ctx);
517 		conf = TLS_HW;
518 		if (rc) {
519 #else
520 		{
521 #endif
522 			rc = tls_set_sw_offload(sk, ctx, 1);
523 			conf = TLS_SW;
524 		}
525 	} else {
526 #ifdef CONFIG_TLS_DEVICE
527 		rc = tls_set_device_offload_rx(sk, ctx);
528 		conf = TLS_HW;
529 		if (rc) {
530 #else
531 		{
532 #endif
533 			rc = tls_set_sw_offload(sk, ctx, 0);
534 			conf = TLS_SW;
535 		}
536 	}
537 
538 	if (rc)
539 		goto err_crypto_info;
540 
541 	if (tx)
542 		ctx->tx_conf = conf;
543 	else
544 		ctx->rx_conf = conf;
545 	update_sk_prot(sk, ctx);
546 	if (tx) {
547 		ctx->sk_write_space = sk->sk_write_space;
548 		sk->sk_write_space = tls_write_space;
549 	} else {
550 		sk->sk_socket->ops = &tls_sw_proto_ops;
551 	}
552 	goto out;
553 
554 err_crypto_info:
555 	memzero_explicit(crypto_info, sizeof(union tls_crypto_context));
556 out:
557 	return rc;
558 }
559 
560 static int do_tls_setsockopt(struct sock *sk, int optname,
561 			     char __user *optval, unsigned int optlen)
562 {
563 	int rc = 0;
564 
565 	switch (optname) {
566 	case TLS_TX:
567 	case TLS_RX:
568 		lock_sock(sk);
569 		rc = do_tls_setsockopt_conf(sk, optval, optlen,
570 					    optname == TLS_TX);
571 		release_sock(sk);
572 		break;
573 	default:
574 		rc = -ENOPROTOOPT;
575 		break;
576 	}
577 	return rc;
578 }
579 
580 static int tls_setsockopt(struct sock *sk, int level, int optname,
581 			  char __user *optval, unsigned int optlen)
582 {
583 	struct tls_context *ctx = tls_get_ctx(sk);
584 
585 	if (level != SOL_TLS)
586 		return ctx->setsockopt(sk, level, optname, optval, optlen);
587 
588 	return do_tls_setsockopt(sk, optname, optval, optlen);
589 }
590 
591 static struct tls_context *create_ctx(struct sock *sk)
592 {
593 	struct inet_connection_sock *icsk = inet_csk(sk);
594 	struct tls_context *ctx;
595 
596 	ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
597 	if (!ctx)
598 		return NULL;
599 
600 	icsk->icsk_ulp_data = ctx;
601 	ctx->setsockopt = sk->sk_prot->setsockopt;
602 	ctx->getsockopt = sk->sk_prot->getsockopt;
603 	ctx->sk_proto_close = sk->sk_prot->close;
604 	return ctx;
605 }
606 
607 static void tls_build_proto(struct sock *sk)
608 {
609 	int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
610 
611 	/* Build IPv6 TLS whenever the address of tcpv6 _prot changes */
612 	if (ip_ver == TLSV6 &&
613 	    unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
614 		mutex_lock(&tcpv6_prot_mutex);
615 		if (likely(sk->sk_prot != saved_tcpv6_prot)) {
616 			build_protos(tls_prots[TLSV6], sk->sk_prot);
617 			smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
618 		}
619 		mutex_unlock(&tcpv6_prot_mutex);
620 	}
621 
622 	if (ip_ver == TLSV4 &&
623 	    unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv4_prot))) {
624 		mutex_lock(&tcpv4_prot_mutex);
625 		if (likely(sk->sk_prot != saved_tcpv4_prot)) {
626 			build_protos(tls_prots[TLSV4], sk->sk_prot);
627 			smp_store_release(&saved_tcpv4_prot, sk->sk_prot);
628 		}
629 		mutex_unlock(&tcpv4_prot_mutex);
630 	}
631 }
632 
633 static void tls_hw_sk_destruct(struct sock *sk)
634 {
635 	struct tls_context *ctx = tls_get_ctx(sk);
636 	struct inet_connection_sock *icsk = inet_csk(sk);
637 
638 	ctx->sk_destruct(sk);
639 	/* Free ctx */
640 	kfree(ctx);
641 	icsk->icsk_ulp_data = NULL;
642 }
643 
644 static int tls_hw_prot(struct sock *sk)
645 {
646 	struct tls_context *ctx;
647 	struct tls_device *dev;
648 	int rc = 0;
649 
650 	spin_lock_bh(&device_spinlock);
651 	list_for_each_entry(dev, &device_list, dev_list) {
652 		if (dev->feature && dev->feature(dev)) {
653 			ctx = create_ctx(sk);
654 			if (!ctx)
655 				goto out;
656 
657 			spin_unlock_bh(&device_spinlock);
658 			tls_build_proto(sk);
659 			ctx->hash = sk->sk_prot->hash;
660 			ctx->unhash = sk->sk_prot->unhash;
661 			ctx->sk_proto_close = sk->sk_prot->close;
662 			ctx->sk_destruct = sk->sk_destruct;
663 			sk->sk_destruct = tls_hw_sk_destruct;
664 			ctx->rx_conf = TLS_HW_RECORD;
665 			ctx->tx_conf = TLS_HW_RECORD;
666 			update_sk_prot(sk, ctx);
667 			spin_lock_bh(&device_spinlock);
668 			rc = 1;
669 			break;
670 		}
671 	}
672 out:
673 	spin_unlock_bh(&device_spinlock);
674 	return rc;
675 }
676 
677 static void tls_hw_unhash(struct sock *sk)
678 {
679 	struct tls_context *ctx = tls_get_ctx(sk);
680 	struct tls_device *dev;
681 
682 	spin_lock_bh(&device_spinlock);
683 	list_for_each_entry(dev, &device_list, dev_list) {
684 		if (dev->unhash) {
685 			kref_get(&dev->kref);
686 			spin_unlock_bh(&device_spinlock);
687 			dev->unhash(dev, sk);
688 			kref_put(&dev->kref, dev->release);
689 			spin_lock_bh(&device_spinlock);
690 		}
691 	}
692 	spin_unlock_bh(&device_spinlock);
693 	ctx->unhash(sk);
694 }
695 
696 static int tls_hw_hash(struct sock *sk)
697 {
698 	struct tls_context *ctx = tls_get_ctx(sk);
699 	struct tls_device *dev;
700 	int err;
701 
702 	err = ctx->hash(sk);
703 	spin_lock_bh(&device_spinlock);
704 	list_for_each_entry(dev, &device_list, dev_list) {
705 		if (dev->hash) {
706 			kref_get(&dev->kref);
707 			spin_unlock_bh(&device_spinlock);
708 			err |= dev->hash(dev, sk);
709 			kref_put(&dev->kref, dev->release);
710 			spin_lock_bh(&device_spinlock);
711 		}
712 	}
713 	spin_unlock_bh(&device_spinlock);
714 
715 	if (err)
716 		tls_hw_unhash(sk);
717 	return err;
718 }
719 
720 static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
721 			 struct proto *base)
722 {
723 	prot[TLS_BASE][TLS_BASE] = *base;
724 	prot[TLS_BASE][TLS_BASE].setsockopt	= tls_setsockopt;
725 	prot[TLS_BASE][TLS_BASE].getsockopt	= tls_getsockopt;
726 	prot[TLS_BASE][TLS_BASE].close		= tls_sk_proto_close;
727 
728 	prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
729 	prot[TLS_SW][TLS_BASE].sendmsg		= tls_sw_sendmsg;
730 	prot[TLS_SW][TLS_BASE].sendpage		= tls_sw_sendpage;
731 
732 	prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE];
733 	prot[TLS_BASE][TLS_SW].recvmsg		  = tls_sw_recvmsg;
734 	prot[TLS_BASE][TLS_SW].stream_memory_read = tls_sw_stream_read;
735 	prot[TLS_BASE][TLS_SW].close		  = tls_sk_proto_close;
736 
737 	prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE];
738 	prot[TLS_SW][TLS_SW].recvmsg		= tls_sw_recvmsg;
739 	prot[TLS_SW][TLS_SW].stream_memory_read	= tls_sw_stream_read;
740 	prot[TLS_SW][TLS_SW].close		= tls_sk_proto_close;
741 
742 #ifdef CONFIG_TLS_DEVICE
743 	prot[TLS_HW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
744 	prot[TLS_HW][TLS_BASE].sendmsg		= tls_device_sendmsg;
745 	prot[TLS_HW][TLS_BASE].sendpage		= tls_device_sendpage;
746 
747 	prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW];
748 	prot[TLS_HW][TLS_SW].sendmsg		= tls_device_sendmsg;
749 	prot[TLS_HW][TLS_SW].sendpage		= tls_device_sendpage;
750 
751 	prot[TLS_BASE][TLS_HW] = prot[TLS_BASE][TLS_SW];
752 
753 	prot[TLS_SW][TLS_HW] = prot[TLS_SW][TLS_SW];
754 
755 	prot[TLS_HW][TLS_HW] = prot[TLS_HW][TLS_SW];
756 #endif
757 
758 	prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
759 	prot[TLS_HW_RECORD][TLS_HW_RECORD].hash		= tls_hw_hash;
760 	prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash	= tls_hw_unhash;
761 	prot[TLS_HW_RECORD][TLS_HW_RECORD].close	= tls_sk_proto_close;
762 }
763 
764 static int tls_init(struct sock *sk)
765 {
766 	struct tls_context *ctx;
767 	int rc = 0;
768 
769 	if (tls_hw_prot(sk))
770 		goto out;
771 
772 	/* The TLS ulp is currently supported only for TCP sockets
773 	 * in ESTABLISHED state.
774 	 * Supporting sockets in LISTEN state will require us
775 	 * to modify the accept implementation to clone rather then
776 	 * share the ulp context.
777 	 */
778 	if (sk->sk_state != TCP_ESTABLISHED)
779 		return -ENOTSUPP;
780 
781 	/* allocate tls context */
782 	ctx = create_ctx(sk);
783 	if (!ctx) {
784 		rc = -ENOMEM;
785 		goto out;
786 	}
787 
788 	tls_build_proto(sk);
789 	ctx->tx_conf = TLS_BASE;
790 	ctx->rx_conf = TLS_BASE;
791 	update_sk_prot(sk, ctx);
792 out:
793 	return rc;
794 }
795 
796 void tls_register_device(struct tls_device *device)
797 {
798 	spin_lock_bh(&device_spinlock);
799 	list_add_tail(&device->dev_list, &device_list);
800 	spin_unlock_bh(&device_spinlock);
801 }
802 EXPORT_SYMBOL(tls_register_device);
803 
804 void tls_unregister_device(struct tls_device *device)
805 {
806 	spin_lock_bh(&device_spinlock);
807 	list_del(&device->dev_list);
808 	spin_unlock_bh(&device_spinlock);
809 }
810 EXPORT_SYMBOL(tls_unregister_device);
811 
812 static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
813 	.name			= "tls",
814 	.owner			= THIS_MODULE,
815 	.init			= tls_init,
816 };
817 
818 static int __init tls_register(void)
819 {
820 	tls_sw_proto_ops = inet_stream_ops;
821 	tls_sw_proto_ops.splice_read = tls_sw_splice_read;
822 
823 #ifdef CONFIG_TLS_DEVICE
824 	tls_device_init();
825 #endif
826 	tcp_register_ulp(&tcp_tls_ulp_ops);
827 
828 	return 0;
829 }
830 
831 static void __exit tls_unregister(void)
832 {
833 	tcp_unregister_ulp(&tcp_tls_ulp_ops);
834 #ifdef CONFIG_TLS_DEVICE
835 	tls_device_cleanup();
836 #endif
837 }
838 
839 module_init(tls_register);
840 module_exit(tls_unregister);
841