1 /* 2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/module.h> 35 36 #include <net/tcp.h> 37 #include <net/inet_common.h> 38 #include <linux/highmem.h> 39 #include <linux/netdevice.h> 40 #include <linux/sched/signal.h> 41 #include <linux/inetdevice.h> 42 43 #include <net/tls.h> 44 45 MODULE_AUTHOR("Mellanox Technologies"); 46 MODULE_DESCRIPTION("Transport Layer Security Support"); 47 MODULE_LICENSE("Dual BSD/GPL"); 48 MODULE_ALIAS_TCP_ULP("tls"); 49 50 enum { 51 TLSV4, 52 TLSV6, 53 TLS_NUM_PROTS, 54 }; 55 56 static struct proto *saved_tcpv6_prot; 57 static DEFINE_MUTEX(tcpv6_prot_mutex); 58 static LIST_HEAD(device_list); 59 static DEFINE_MUTEX(device_mutex); 60 static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG]; 61 static struct proto_ops tls_sw_proto_ops; 62 63 static void update_sk_prot(struct sock *sk, struct tls_context *ctx) 64 { 65 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; 66 67 sk->sk_prot = &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]; 68 } 69 70 int wait_on_pending_writer(struct sock *sk, long *timeo) 71 { 72 int rc = 0; 73 DEFINE_WAIT_FUNC(wait, woken_wake_function); 74 75 add_wait_queue(sk_sleep(sk), &wait); 76 while (1) { 77 if (!*timeo) { 78 rc = -EAGAIN; 79 break; 80 } 81 82 if (signal_pending(current)) { 83 rc = sock_intr_errno(*timeo); 84 break; 85 } 86 87 if (sk_wait_event(sk, timeo, !sk->sk_write_pending, &wait)) 88 break; 89 } 90 remove_wait_queue(sk_sleep(sk), &wait); 91 return rc; 92 } 93 94 int tls_push_sg(struct sock *sk, 95 struct tls_context *ctx, 96 struct scatterlist *sg, 97 u16 first_offset, 98 int flags) 99 { 100 int sendpage_flags = flags | MSG_SENDPAGE_NOTLAST; 101 int ret = 0; 102 struct page *p; 103 size_t size; 104 int offset = first_offset; 105 106 size = sg->length - offset; 107 offset += sg->offset; 108 109 ctx->in_tcp_sendpages = true; 110 while (1) { 111 if (sg_is_last(sg)) 112 sendpage_flags = flags; 113 114 /* is sending application-limited? */ 115 tcp_rate_check_app_limited(sk); 116 p = sg_page(sg); 117 retry: 118 ret = do_tcp_sendpages(sk, p, offset, size, sendpage_flags); 119 120 if (ret != size) { 121 if (ret > 0) { 122 offset += ret; 123 size -= ret; 124 goto retry; 125 } 126 127 offset -= sg->offset; 128 ctx->partially_sent_offset = offset; 129 ctx->partially_sent_record = (void *)sg; 130 ctx->in_tcp_sendpages = false; 131 return ret; 132 } 133 134 put_page(p); 135 sk_mem_uncharge(sk, sg->length); 136 sg = sg_next(sg); 137 if (!sg) 138 break; 139 140 offset = sg->offset; 141 size = sg->length; 142 } 143 144 ctx->in_tcp_sendpages = false; 145 ctx->sk_write_space(sk); 146 147 return 0; 148 } 149 150 static int tls_handle_open_record(struct sock *sk, int flags) 151 { 152 struct tls_context *ctx = tls_get_ctx(sk); 153 154 if (tls_is_pending_open_record(ctx)) 155 return ctx->push_pending_record(sk, flags); 156 157 return 0; 158 } 159 160 int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg, 161 unsigned char *record_type) 162 { 163 struct cmsghdr *cmsg; 164 int rc = -EINVAL; 165 166 for_each_cmsghdr(cmsg, msg) { 167 if (!CMSG_OK(msg, cmsg)) 168 return -EINVAL; 169 if (cmsg->cmsg_level != SOL_TLS) 170 continue; 171 172 switch (cmsg->cmsg_type) { 173 case TLS_SET_RECORD_TYPE: 174 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*record_type))) 175 return -EINVAL; 176 177 if (msg->msg_flags & MSG_MORE) 178 return -EINVAL; 179 180 rc = tls_handle_open_record(sk, msg->msg_flags); 181 if (rc) 182 return rc; 183 184 *record_type = *(unsigned char *)CMSG_DATA(cmsg); 185 rc = 0; 186 break; 187 default: 188 return -EINVAL; 189 } 190 } 191 192 return rc; 193 } 194 195 int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, 196 int flags) 197 { 198 struct scatterlist *sg; 199 u16 offset; 200 201 sg = ctx->partially_sent_record; 202 offset = ctx->partially_sent_offset; 203 204 ctx->partially_sent_record = NULL; 205 return tls_push_sg(sk, ctx, sg, offset, flags); 206 } 207 208 int tls_push_pending_closed_record(struct sock *sk, 209 struct tls_context *tls_ctx, 210 int flags, long *timeo) 211 { 212 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 213 214 if (tls_is_partially_sent_record(tls_ctx) || 215 !list_empty(&ctx->tx_list)) 216 return tls_tx_records(sk, flags); 217 else 218 return tls_ctx->push_pending_record(sk, flags); 219 } 220 221 static void tls_write_space(struct sock *sk) 222 { 223 struct tls_context *ctx = tls_get_ctx(sk); 224 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx); 225 226 /* If in_tcp_sendpages call lower protocol write space handler 227 * to ensure we wake up any waiting operations there. For example 228 * if do_tcp_sendpages where to call sk_wait_event. 229 */ 230 if (ctx->in_tcp_sendpages) { 231 ctx->sk_write_space(sk); 232 return; 233 } 234 235 /* Schedule the transmission if tx list is ready */ 236 if (is_tx_ready(tx_ctx) && !sk->sk_write_pending) { 237 /* Schedule the transmission */ 238 if (!test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask)) 239 schedule_delayed_work(&tx_ctx->tx_work.work, 0); 240 } 241 242 ctx->sk_write_space(sk); 243 } 244 245 static void tls_ctx_free(struct tls_context *ctx) 246 { 247 if (!ctx) 248 return; 249 250 memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send)); 251 memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv)); 252 kfree(ctx); 253 } 254 255 static void tls_sk_proto_close(struct sock *sk, long timeout) 256 { 257 struct tls_context *ctx = tls_get_ctx(sk); 258 long timeo = sock_sndtimeo(sk, 0); 259 void (*sk_proto_close)(struct sock *sk, long timeout); 260 bool free_ctx = false; 261 262 lock_sock(sk); 263 sk_proto_close = ctx->sk_proto_close; 264 265 if ((ctx->tx_conf == TLS_HW_RECORD && ctx->rx_conf == TLS_HW_RECORD) || 266 (ctx->tx_conf == TLS_BASE && ctx->rx_conf == TLS_BASE)) { 267 free_ctx = true; 268 goto skip_tx_cleanup; 269 } 270 271 if (!tls_complete_pending_work(sk, ctx, 0, &timeo)) 272 tls_handle_open_record(sk, 0); 273 274 /* We need these for tls_sw_fallback handling of other packets */ 275 if (ctx->tx_conf == TLS_SW) { 276 kfree(ctx->tx.rec_seq); 277 kfree(ctx->tx.iv); 278 tls_sw_free_resources_tx(sk); 279 } 280 281 if (ctx->rx_conf == TLS_SW) { 282 kfree(ctx->rx.rec_seq); 283 kfree(ctx->rx.iv); 284 tls_sw_free_resources_rx(sk); 285 } 286 287 #ifdef CONFIG_TLS_DEVICE 288 if (ctx->rx_conf == TLS_HW) 289 tls_device_offload_cleanup_rx(sk); 290 291 if (ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW) { 292 #else 293 { 294 #endif 295 tls_ctx_free(ctx); 296 ctx = NULL; 297 } 298 299 skip_tx_cleanup: 300 release_sock(sk); 301 sk_proto_close(sk, timeout); 302 /* free ctx for TLS_HW_RECORD, used by tcp_set_state 303 * for sk->sk_prot->unhash [tls_hw_unhash] 304 */ 305 if (free_ctx) 306 tls_ctx_free(ctx); 307 } 308 309 static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval, 310 int __user *optlen) 311 { 312 int rc = 0; 313 struct tls_context *ctx = tls_get_ctx(sk); 314 struct tls_crypto_info *crypto_info; 315 int len; 316 317 if (get_user(len, optlen)) 318 return -EFAULT; 319 320 if (!optval || (len < sizeof(*crypto_info))) { 321 rc = -EINVAL; 322 goto out; 323 } 324 325 if (!ctx) { 326 rc = -EBUSY; 327 goto out; 328 } 329 330 /* get user crypto info */ 331 crypto_info = &ctx->crypto_send.info; 332 333 if (!TLS_CRYPTO_INFO_READY(crypto_info)) { 334 rc = -EBUSY; 335 goto out; 336 } 337 338 if (len == sizeof(*crypto_info)) { 339 if (copy_to_user(optval, crypto_info, sizeof(*crypto_info))) 340 rc = -EFAULT; 341 goto out; 342 } 343 344 switch (crypto_info->cipher_type) { 345 case TLS_CIPHER_AES_GCM_128: { 346 struct tls12_crypto_info_aes_gcm_128 * 347 crypto_info_aes_gcm_128 = 348 container_of(crypto_info, 349 struct tls12_crypto_info_aes_gcm_128, 350 info); 351 352 if (len != sizeof(*crypto_info_aes_gcm_128)) { 353 rc = -EINVAL; 354 goto out; 355 } 356 lock_sock(sk); 357 memcpy(crypto_info_aes_gcm_128->iv, 358 ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, 359 TLS_CIPHER_AES_GCM_128_IV_SIZE); 360 memcpy(crypto_info_aes_gcm_128->rec_seq, ctx->tx.rec_seq, 361 TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE); 362 release_sock(sk); 363 if (copy_to_user(optval, 364 crypto_info_aes_gcm_128, 365 sizeof(*crypto_info_aes_gcm_128))) 366 rc = -EFAULT; 367 break; 368 } 369 default: 370 rc = -EINVAL; 371 } 372 373 out: 374 return rc; 375 } 376 377 static int do_tls_getsockopt(struct sock *sk, int optname, 378 char __user *optval, int __user *optlen) 379 { 380 int rc = 0; 381 382 switch (optname) { 383 case TLS_TX: 384 rc = do_tls_getsockopt_tx(sk, optval, optlen); 385 break; 386 default: 387 rc = -ENOPROTOOPT; 388 break; 389 } 390 return rc; 391 } 392 393 static int tls_getsockopt(struct sock *sk, int level, int optname, 394 char __user *optval, int __user *optlen) 395 { 396 struct tls_context *ctx = tls_get_ctx(sk); 397 398 if (level != SOL_TLS) 399 return ctx->getsockopt(sk, level, optname, optval, optlen); 400 401 return do_tls_getsockopt(sk, optname, optval, optlen); 402 } 403 404 static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval, 405 unsigned int optlen, int tx) 406 { 407 struct tls_crypto_info *crypto_info; 408 struct tls_context *ctx = tls_get_ctx(sk); 409 int rc = 0; 410 int conf; 411 412 if (!optval || (optlen < sizeof(*crypto_info))) { 413 rc = -EINVAL; 414 goto out; 415 } 416 417 if (tx) 418 crypto_info = &ctx->crypto_send.info; 419 else 420 crypto_info = &ctx->crypto_recv.info; 421 422 /* Currently we don't support set crypto info more than one time */ 423 if (TLS_CRYPTO_INFO_READY(crypto_info)) { 424 rc = -EBUSY; 425 goto out; 426 } 427 428 rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info)); 429 if (rc) { 430 rc = -EFAULT; 431 goto err_crypto_info; 432 } 433 434 /* check version */ 435 if (crypto_info->version != TLS_1_2_VERSION) { 436 rc = -ENOTSUPP; 437 goto err_crypto_info; 438 } 439 440 switch (crypto_info->cipher_type) { 441 case TLS_CIPHER_AES_GCM_128: { 442 if (optlen != sizeof(struct tls12_crypto_info_aes_gcm_128)) { 443 rc = -EINVAL; 444 goto err_crypto_info; 445 } 446 rc = copy_from_user(crypto_info + 1, optval + sizeof(*crypto_info), 447 optlen - sizeof(*crypto_info)); 448 if (rc) { 449 rc = -EFAULT; 450 goto err_crypto_info; 451 } 452 break; 453 } 454 default: 455 rc = -EINVAL; 456 goto err_crypto_info; 457 } 458 459 if (tx) { 460 #ifdef CONFIG_TLS_DEVICE 461 rc = tls_set_device_offload(sk, ctx); 462 conf = TLS_HW; 463 if (rc) { 464 #else 465 { 466 #endif 467 rc = tls_set_sw_offload(sk, ctx, 1); 468 conf = TLS_SW; 469 } 470 } else { 471 #ifdef CONFIG_TLS_DEVICE 472 rc = tls_set_device_offload_rx(sk, ctx); 473 conf = TLS_HW; 474 if (rc) { 475 #else 476 { 477 #endif 478 rc = tls_set_sw_offload(sk, ctx, 0); 479 conf = TLS_SW; 480 } 481 } 482 483 if (rc) 484 goto err_crypto_info; 485 486 if (tx) 487 ctx->tx_conf = conf; 488 else 489 ctx->rx_conf = conf; 490 update_sk_prot(sk, ctx); 491 if (tx) { 492 ctx->sk_write_space = sk->sk_write_space; 493 sk->sk_write_space = tls_write_space; 494 } else { 495 sk->sk_socket->ops = &tls_sw_proto_ops; 496 } 497 goto out; 498 499 err_crypto_info: 500 memzero_explicit(crypto_info, sizeof(union tls_crypto_context)); 501 out: 502 return rc; 503 } 504 505 static int do_tls_setsockopt(struct sock *sk, int optname, 506 char __user *optval, unsigned int optlen) 507 { 508 int rc = 0; 509 510 switch (optname) { 511 case TLS_TX: 512 case TLS_RX: 513 lock_sock(sk); 514 rc = do_tls_setsockopt_conf(sk, optval, optlen, 515 optname == TLS_TX); 516 release_sock(sk); 517 break; 518 default: 519 rc = -ENOPROTOOPT; 520 break; 521 } 522 return rc; 523 } 524 525 static int tls_setsockopt(struct sock *sk, int level, int optname, 526 char __user *optval, unsigned int optlen) 527 { 528 struct tls_context *ctx = tls_get_ctx(sk); 529 530 if (level != SOL_TLS) 531 return ctx->setsockopt(sk, level, optname, optval, optlen); 532 533 return do_tls_setsockopt(sk, optname, optval, optlen); 534 } 535 536 static struct tls_context *create_ctx(struct sock *sk) 537 { 538 struct inet_connection_sock *icsk = inet_csk(sk); 539 struct tls_context *ctx; 540 541 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 542 if (!ctx) 543 return NULL; 544 545 icsk->icsk_ulp_data = ctx; 546 return ctx; 547 } 548 549 static int tls_hw_prot(struct sock *sk) 550 { 551 struct tls_context *ctx; 552 struct tls_device *dev; 553 int rc = 0; 554 555 mutex_lock(&device_mutex); 556 list_for_each_entry(dev, &device_list, dev_list) { 557 if (dev->feature && dev->feature(dev)) { 558 ctx = create_ctx(sk); 559 if (!ctx) 560 goto out; 561 562 ctx->hash = sk->sk_prot->hash; 563 ctx->unhash = sk->sk_prot->unhash; 564 ctx->sk_proto_close = sk->sk_prot->close; 565 ctx->rx_conf = TLS_HW_RECORD; 566 ctx->tx_conf = TLS_HW_RECORD; 567 update_sk_prot(sk, ctx); 568 rc = 1; 569 break; 570 } 571 } 572 out: 573 mutex_unlock(&device_mutex); 574 return rc; 575 } 576 577 static void tls_hw_unhash(struct sock *sk) 578 { 579 struct tls_context *ctx = tls_get_ctx(sk); 580 struct tls_device *dev; 581 582 mutex_lock(&device_mutex); 583 list_for_each_entry(dev, &device_list, dev_list) { 584 if (dev->unhash) 585 dev->unhash(dev, sk); 586 } 587 mutex_unlock(&device_mutex); 588 ctx->unhash(sk); 589 } 590 591 static int tls_hw_hash(struct sock *sk) 592 { 593 struct tls_context *ctx = tls_get_ctx(sk); 594 struct tls_device *dev; 595 int err; 596 597 err = ctx->hash(sk); 598 mutex_lock(&device_mutex); 599 list_for_each_entry(dev, &device_list, dev_list) { 600 if (dev->hash) 601 err |= dev->hash(dev, sk); 602 } 603 mutex_unlock(&device_mutex); 604 605 if (err) 606 tls_hw_unhash(sk); 607 return err; 608 } 609 610 static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG], 611 struct proto *base) 612 { 613 prot[TLS_BASE][TLS_BASE] = *base; 614 prot[TLS_BASE][TLS_BASE].setsockopt = tls_setsockopt; 615 prot[TLS_BASE][TLS_BASE].getsockopt = tls_getsockopt; 616 prot[TLS_BASE][TLS_BASE].close = tls_sk_proto_close; 617 618 prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE]; 619 prot[TLS_SW][TLS_BASE].sendmsg = tls_sw_sendmsg; 620 prot[TLS_SW][TLS_BASE].sendpage = tls_sw_sendpage; 621 622 prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE]; 623 prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg; 624 prot[TLS_BASE][TLS_SW].stream_memory_read = tls_sw_stream_read; 625 prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close; 626 627 prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE]; 628 prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg; 629 prot[TLS_SW][TLS_SW].stream_memory_read = tls_sw_stream_read; 630 prot[TLS_SW][TLS_SW].close = tls_sk_proto_close; 631 632 #ifdef CONFIG_TLS_DEVICE 633 prot[TLS_HW][TLS_BASE] = prot[TLS_BASE][TLS_BASE]; 634 prot[TLS_HW][TLS_BASE].sendmsg = tls_device_sendmsg; 635 prot[TLS_HW][TLS_BASE].sendpage = tls_device_sendpage; 636 637 prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW]; 638 prot[TLS_HW][TLS_SW].sendmsg = tls_device_sendmsg; 639 prot[TLS_HW][TLS_SW].sendpage = tls_device_sendpage; 640 641 prot[TLS_BASE][TLS_HW] = prot[TLS_BASE][TLS_SW]; 642 643 prot[TLS_SW][TLS_HW] = prot[TLS_SW][TLS_SW]; 644 645 prot[TLS_HW][TLS_HW] = prot[TLS_HW][TLS_SW]; 646 #endif 647 648 prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base; 649 prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_hw_hash; 650 prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_hw_unhash; 651 prot[TLS_HW_RECORD][TLS_HW_RECORD].close = tls_sk_proto_close; 652 } 653 654 static int tls_init(struct sock *sk) 655 { 656 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; 657 struct tls_context *ctx; 658 int rc = 0; 659 660 if (tls_hw_prot(sk)) 661 goto out; 662 663 /* The TLS ulp is currently supported only for TCP sockets 664 * in ESTABLISHED state. 665 * Supporting sockets in LISTEN state will require us 666 * to modify the accept implementation to clone rather then 667 * share the ulp context. 668 */ 669 if (sk->sk_state != TCP_ESTABLISHED) 670 return -ENOTSUPP; 671 672 /* allocate tls context */ 673 ctx = create_ctx(sk); 674 if (!ctx) { 675 rc = -ENOMEM; 676 goto out; 677 } 678 ctx->setsockopt = sk->sk_prot->setsockopt; 679 ctx->getsockopt = sk->sk_prot->getsockopt; 680 ctx->sk_proto_close = sk->sk_prot->close; 681 682 /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */ 683 if (ip_ver == TLSV6 && 684 unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) { 685 mutex_lock(&tcpv6_prot_mutex); 686 if (likely(sk->sk_prot != saved_tcpv6_prot)) { 687 build_protos(tls_prots[TLSV6], sk->sk_prot); 688 smp_store_release(&saved_tcpv6_prot, sk->sk_prot); 689 } 690 mutex_unlock(&tcpv6_prot_mutex); 691 } 692 693 ctx->tx_conf = TLS_BASE; 694 ctx->rx_conf = TLS_BASE; 695 update_sk_prot(sk, ctx); 696 out: 697 return rc; 698 } 699 700 void tls_register_device(struct tls_device *device) 701 { 702 mutex_lock(&device_mutex); 703 list_add_tail(&device->dev_list, &device_list); 704 mutex_unlock(&device_mutex); 705 } 706 EXPORT_SYMBOL(tls_register_device); 707 708 void tls_unregister_device(struct tls_device *device) 709 { 710 mutex_lock(&device_mutex); 711 list_del(&device->dev_list); 712 mutex_unlock(&device_mutex); 713 } 714 EXPORT_SYMBOL(tls_unregister_device); 715 716 static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = { 717 .name = "tls", 718 .owner = THIS_MODULE, 719 .init = tls_init, 720 }; 721 722 static int __init tls_register(void) 723 { 724 build_protos(tls_prots[TLSV4], &tcp_prot); 725 726 tls_sw_proto_ops = inet_stream_ops; 727 tls_sw_proto_ops.splice_read = tls_sw_splice_read; 728 729 #ifdef CONFIG_TLS_DEVICE 730 tls_device_init(); 731 #endif 732 tcp_register_ulp(&tcp_tls_ulp_ops); 733 734 return 0; 735 } 736 737 static void __exit tls_unregister(void) 738 { 739 tcp_unregister_ulp(&tcp_tls_ulp_ops); 740 #ifdef CONFIG_TLS_DEVICE 741 tls_device_cleanup(); 742 #endif 743 } 744 745 module_init(tls_register); 746 module_exit(tls_unregister); 747