1 /* 2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/module.h> 35 36 #include <net/tcp.h> 37 #include <net/inet_common.h> 38 #include <linux/highmem.h> 39 #include <linux/netdevice.h> 40 #include <linux/sched/signal.h> 41 #include <linux/inetdevice.h> 42 43 #include <net/tls.h> 44 45 MODULE_AUTHOR("Mellanox Technologies"); 46 MODULE_DESCRIPTION("Transport Layer Security Support"); 47 MODULE_LICENSE("Dual BSD/GPL"); 48 MODULE_ALIAS_TCP_ULP("tls"); 49 50 enum { 51 TLSV4, 52 TLSV6, 53 TLS_NUM_PROTS, 54 }; 55 56 static struct proto *saved_tcpv6_prot; 57 static DEFINE_MUTEX(tcpv6_prot_mutex); 58 static struct proto *saved_tcpv4_prot; 59 static DEFINE_MUTEX(tcpv4_prot_mutex); 60 static LIST_HEAD(device_list); 61 static DEFINE_SPINLOCK(device_spinlock); 62 static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG]; 63 static struct proto_ops tls_sw_proto_ops; 64 65 static void update_sk_prot(struct sock *sk, struct tls_context *ctx) 66 { 67 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; 68 69 sk->sk_prot = &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]; 70 } 71 72 int wait_on_pending_writer(struct sock *sk, long *timeo) 73 { 74 int rc = 0; 75 DEFINE_WAIT_FUNC(wait, woken_wake_function); 76 77 add_wait_queue(sk_sleep(sk), &wait); 78 while (1) { 79 if (!*timeo) { 80 rc = -EAGAIN; 81 break; 82 } 83 84 if (signal_pending(current)) { 85 rc = sock_intr_errno(*timeo); 86 break; 87 } 88 89 if (sk_wait_event(sk, timeo, !sk->sk_write_pending, &wait)) 90 break; 91 } 92 remove_wait_queue(sk_sleep(sk), &wait); 93 return rc; 94 } 95 96 int tls_push_sg(struct sock *sk, 97 struct tls_context *ctx, 98 struct scatterlist *sg, 99 u16 first_offset, 100 int flags) 101 { 102 int sendpage_flags = flags | MSG_SENDPAGE_NOTLAST; 103 int ret = 0; 104 struct page *p; 105 size_t size; 106 int offset = first_offset; 107 108 size = sg->length - offset; 109 offset += sg->offset; 110 111 ctx->in_tcp_sendpages = true; 112 while (1) { 113 if (sg_is_last(sg)) 114 sendpage_flags = flags; 115 116 /* is sending application-limited? */ 117 tcp_rate_check_app_limited(sk); 118 p = sg_page(sg); 119 retry: 120 ret = do_tcp_sendpages(sk, p, offset, size, sendpage_flags); 121 122 if (ret != size) { 123 if (ret > 0) { 124 offset += ret; 125 size -= ret; 126 goto retry; 127 } 128 129 offset -= sg->offset; 130 ctx->partially_sent_offset = offset; 131 ctx->partially_sent_record = (void *)sg; 132 ctx->in_tcp_sendpages = false; 133 return ret; 134 } 135 136 put_page(p); 137 sk_mem_uncharge(sk, sg->length); 138 sg = sg_next(sg); 139 if (!sg) 140 break; 141 142 offset = sg->offset; 143 size = sg->length; 144 } 145 146 ctx->in_tcp_sendpages = false; 147 ctx->sk_write_space(sk); 148 149 return 0; 150 } 151 152 static int tls_handle_open_record(struct sock *sk, int flags) 153 { 154 struct tls_context *ctx = tls_get_ctx(sk); 155 156 if (tls_is_pending_open_record(ctx)) 157 return ctx->push_pending_record(sk, flags); 158 159 return 0; 160 } 161 162 int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg, 163 unsigned char *record_type) 164 { 165 struct cmsghdr *cmsg; 166 int rc = -EINVAL; 167 168 for_each_cmsghdr(cmsg, msg) { 169 if (!CMSG_OK(msg, cmsg)) 170 return -EINVAL; 171 if (cmsg->cmsg_level != SOL_TLS) 172 continue; 173 174 switch (cmsg->cmsg_type) { 175 case TLS_SET_RECORD_TYPE: 176 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*record_type))) 177 return -EINVAL; 178 179 if (msg->msg_flags & MSG_MORE) 180 return -EINVAL; 181 182 rc = tls_handle_open_record(sk, msg->msg_flags); 183 if (rc) 184 return rc; 185 186 *record_type = *(unsigned char *)CMSG_DATA(cmsg); 187 rc = 0; 188 break; 189 default: 190 return -EINVAL; 191 } 192 } 193 194 return rc; 195 } 196 197 int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, 198 int flags) 199 { 200 struct scatterlist *sg; 201 u16 offset; 202 203 sg = ctx->partially_sent_record; 204 offset = ctx->partially_sent_offset; 205 206 ctx->partially_sent_record = NULL; 207 return tls_push_sg(sk, ctx, sg, offset, flags); 208 } 209 210 int tls_push_pending_closed_record(struct sock *sk, 211 struct tls_context *tls_ctx, 212 int flags, long *timeo) 213 { 214 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 215 216 if (tls_is_partially_sent_record(tls_ctx) || 217 !list_empty(&ctx->tx_list)) 218 return tls_tx_records(sk, flags); 219 else 220 return tls_ctx->push_pending_record(sk, flags); 221 } 222 223 static void tls_write_space(struct sock *sk) 224 { 225 struct tls_context *ctx = tls_get_ctx(sk); 226 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx); 227 228 /* If in_tcp_sendpages call lower protocol write space handler 229 * to ensure we wake up any waiting operations there. For example 230 * if do_tcp_sendpages where to call sk_wait_event. 231 */ 232 if (ctx->in_tcp_sendpages) { 233 ctx->sk_write_space(sk); 234 return; 235 } 236 237 /* Schedule the transmission if tx list is ready */ 238 if (is_tx_ready(tx_ctx) && !sk->sk_write_pending) { 239 /* Schedule the transmission */ 240 if (!test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask)) 241 schedule_delayed_work(&tx_ctx->tx_work.work, 0); 242 } 243 244 ctx->sk_write_space(sk); 245 } 246 247 static void tls_ctx_free(struct tls_context *ctx) 248 { 249 if (!ctx) 250 return; 251 252 memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send)); 253 memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv)); 254 kfree(ctx); 255 } 256 257 static void tls_sk_proto_close(struct sock *sk, long timeout) 258 { 259 struct tls_context *ctx = tls_get_ctx(sk); 260 long timeo = sock_sndtimeo(sk, 0); 261 void (*sk_proto_close)(struct sock *sk, long timeout); 262 bool free_ctx = false; 263 264 lock_sock(sk); 265 sk_proto_close = ctx->sk_proto_close; 266 267 if ((ctx->tx_conf == TLS_HW_RECORD && ctx->rx_conf == TLS_HW_RECORD) || 268 (ctx->tx_conf == TLS_BASE && ctx->rx_conf == TLS_BASE)) { 269 free_ctx = true; 270 goto skip_tx_cleanup; 271 } 272 273 if (!tls_complete_pending_work(sk, ctx, 0, &timeo)) 274 tls_handle_open_record(sk, 0); 275 276 /* We need these for tls_sw_fallback handling of other packets */ 277 if (ctx->tx_conf == TLS_SW) { 278 kfree(ctx->tx.rec_seq); 279 kfree(ctx->tx.iv); 280 tls_sw_free_resources_tx(sk); 281 } 282 283 if (ctx->rx_conf == TLS_SW) { 284 kfree(ctx->rx.rec_seq); 285 kfree(ctx->rx.iv); 286 tls_sw_free_resources_rx(sk); 287 } 288 289 #ifdef CONFIG_TLS_DEVICE 290 if (ctx->rx_conf == TLS_HW) 291 tls_device_offload_cleanup_rx(sk); 292 293 if (ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW) { 294 #else 295 { 296 #endif 297 tls_ctx_free(ctx); 298 ctx = NULL; 299 } 300 301 skip_tx_cleanup: 302 release_sock(sk); 303 sk_proto_close(sk, timeout); 304 /* free ctx for TLS_HW_RECORD, used by tcp_set_state 305 * for sk->sk_prot->unhash [tls_hw_unhash] 306 */ 307 if (free_ctx) 308 tls_ctx_free(ctx); 309 } 310 311 static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval, 312 int __user *optlen) 313 { 314 int rc = 0; 315 struct tls_context *ctx = tls_get_ctx(sk); 316 struct tls_crypto_info *crypto_info; 317 int len; 318 319 if (get_user(len, optlen)) 320 return -EFAULT; 321 322 if (!optval || (len < sizeof(*crypto_info))) { 323 rc = -EINVAL; 324 goto out; 325 } 326 327 if (!ctx) { 328 rc = -EBUSY; 329 goto out; 330 } 331 332 /* get user crypto info */ 333 crypto_info = &ctx->crypto_send.info; 334 335 if (!TLS_CRYPTO_INFO_READY(crypto_info)) { 336 rc = -EBUSY; 337 goto out; 338 } 339 340 if (len == sizeof(*crypto_info)) { 341 if (copy_to_user(optval, crypto_info, sizeof(*crypto_info))) 342 rc = -EFAULT; 343 goto out; 344 } 345 346 switch (crypto_info->cipher_type) { 347 case TLS_CIPHER_AES_GCM_128: { 348 struct tls12_crypto_info_aes_gcm_128 * 349 crypto_info_aes_gcm_128 = 350 container_of(crypto_info, 351 struct tls12_crypto_info_aes_gcm_128, 352 info); 353 354 if (len != sizeof(*crypto_info_aes_gcm_128)) { 355 rc = -EINVAL; 356 goto out; 357 } 358 lock_sock(sk); 359 memcpy(crypto_info_aes_gcm_128->iv, 360 ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, 361 TLS_CIPHER_AES_GCM_128_IV_SIZE); 362 memcpy(crypto_info_aes_gcm_128->rec_seq, ctx->tx.rec_seq, 363 TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE); 364 release_sock(sk); 365 if (copy_to_user(optval, 366 crypto_info_aes_gcm_128, 367 sizeof(*crypto_info_aes_gcm_128))) 368 rc = -EFAULT; 369 break; 370 } 371 default: 372 rc = -EINVAL; 373 } 374 375 out: 376 return rc; 377 } 378 379 static int do_tls_getsockopt(struct sock *sk, int optname, 380 char __user *optval, int __user *optlen) 381 { 382 int rc = 0; 383 384 switch (optname) { 385 case TLS_TX: 386 rc = do_tls_getsockopt_tx(sk, optval, optlen); 387 break; 388 default: 389 rc = -ENOPROTOOPT; 390 break; 391 } 392 return rc; 393 } 394 395 static int tls_getsockopt(struct sock *sk, int level, int optname, 396 char __user *optval, int __user *optlen) 397 { 398 struct tls_context *ctx = tls_get_ctx(sk); 399 400 if (level != SOL_TLS) 401 return ctx->getsockopt(sk, level, optname, optval, optlen); 402 403 return do_tls_getsockopt(sk, optname, optval, optlen); 404 } 405 406 static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval, 407 unsigned int optlen, int tx) 408 { 409 struct tls_crypto_info *crypto_info; 410 struct tls_context *ctx = tls_get_ctx(sk); 411 int rc = 0; 412 int conf; 413 414 if (!optval || (optlen < sizeof(*crypto_info))) { 415 rc = -EINVAL; 416 goto out; 417 } 418 419 if (tx) 420 crypto_info = &ctx->crypto_send.info; 421 else 422 crypto_info = &ctx->crypto_recv.info; 423 424 /* Currently we don't support set crypto info more than one time */ 425 if (TLS_CRYPTO_INFO_READY(crypto_info)) { 426 rc = -EBUSY; 427 goto out; 428 } 429 430 rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info)); 431 if (rc) { 432 rc = -EFAULT; 433 goto err_crypto_info; 434 } 435 436 /* check version */ 437 if (crypto_info->version != TLS_1_2_VERSION) { 438 rc = -ENOTSUPP; 439 goto err_crypto_info; 440 } 441 442 switch (crypto_info->cipher_type) { 443 case TLS_CIPHER_AES_GCM_128: { 444 if (optlen != sizeof(struct tls12_crypto_info_aes_gcm_128)) { 445 rc = -EINVAL; 446 goto err_crypto_info; 447 } 448 rc = copy_from_user(crypto_info + 1, optval + sizeof(*crypto_info), 449 optlen - sizeof(*crypto_info)); 450 if (rc) { 451 rc = -EFAULT; 452 goto err_crypto_info; 453 } 454 break; 455 } 456 default: 457 rc = -EINVAL; 458 goto err_crypto_info; 459 } 460 461 if (tx) { 462 #ifdef CONFIG_TLS_DEVICE 463 rc = tls_set_device_offload(sk, ctx); 464 conf = TLS_HW; 465 if (rc) { 466 #else 467 { 468 #endif 469 rc = tls_set_sw_offload(sk, ctx, 1); 470 conf = TLS_SW; 471 } 472 } else { 473 #ifdef CONFIG_TLS_DEVICE 474 rc = tls_set_device_offload_rx(sk, ctx); 475 conf = TLS_HW; 476 if (rc) { 477 #else 478 { 479 #endif 480 rc = tls_set_sw_offload(sk, ctx, 0); 481 conf = TLS_SW; 482 } 483 } 484 485 if (rc) 486 goto err_crypto_info; 487 488 if (tx) 489 ctx->tx_conf = conf; 490 else 491 ctx->rx_conf = conf; 492 update_sk_prot(sk, ctx); 493 if (tx) { 494 ctx->sk_write_space = sk->sk_write_space; 495 sk->sk_write_space = tls_write_space; 496 } else { 497 sk->sk_socket->ops = &tls_sw_proto_ops; 498 } 499 goto out; 500 501 err_crypto_info: 502 memzero_explicit(crypto_info, sizeof(union tls_crypto_context)); 503 out: 504 return rc; 505 } 506 507 static int do_tls_setsockopt(struct sock *sk, int optname, 508 char __user *optval, unsigned int optlen) 509 { 510 int rc = 0; 511 512 switch (optname) { 513 case TLS_TX: 514 case TLS_RX: 515 lock_sock(sk); 516 rc = do_tls_setsockopt_conf(sk, optval, optlen, 517 optname == TLS_TX); 518 release_sock(sk); 519 break; 520 default: 521 rc = -ENOPROTOOPT; 522 break; 523 } 524 return rc; 525 } 526 527 static int tls_setsockopt(struct sock *sk, int level, int optname, 528 char __user *optval, unsigned int optlen) 529 { 530 struct tls_context *ctx = tls_get_ctx(sk); 531 532 if (level != SOL_TLS) 533 return ctx->setsockopt(sk, level, optname, optval, optlen); 534 535 return do_tls_setsockopt(sk, optname, optval, optlen); 536 } 537 538 static struct tls_context *create_ctx(struct sock *sk) 539 { 540 struct inet_connection_sock *icsk = inet_csk(sk); 541 struct tls_context *ctx; 542 543 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); 544 if (!ctx) 545 return NULL; 546 547 icsk->icsk_ulp_data = ctx; 548 ctx->setsockopt = sk->sk_prot->setsockopt; 549 ctx->getsockopt = sk->sk_prot->getsockopt; 550 ctx->sk_proto_close = sk->sk_prot->close; 551 return ctx; 552 } 553 554 static int tls_hw_prot(struct sock *sk) 555 { 556 struct tls_context *ctx; 557 struct tls_device *dev; 558 int rc = 0; 559 560 spin_lock_bh(&device_spinlock); 561 list_for_each_entry(dev, &device_list, dev_list) { 562 if (dev->feature && dev->feature(dev)) { 563 ctx = create_ctx(sk); 564 if (!ctx) 565 goto out; 566 567 ctx->hash = sk->sk_prot->hash; 568 ctx->unhash = sk->sk_prot->unhash; 569 ctx->sk_proto_close = sk->sk_prot->close; 570 ctx->rx_conf = TLS_HW_RECORD; 571 ctx->tx_conf = TLS_HW_RECORD; 572 update_sk_prot(sk, ctx); 573 rc = 1; 574 break; 575 } 576 } 577 out: 578 spin_unlock_bh(&device_spinlock); 579 return rc; 580 } 581 582 static void tls_hw_unhash(struct sock *sk) 583 { 584 struct tls_context *ctx = tls_get_ctx(sk); 585 struct tls_device *dev; 586 587 spin_lock_bh(&device_spinlock); 588 list_for_each_entry(dev, &device_list, dev_list) { 589 if (dev->unhash) { 590 kref_get(&dev->kref); 591 spin_unlock_bh(&device_spinlock); 592 dev->unhash(dev, sk); 593 kref_put(&dev->kref, dev->release); 594 spin_lock_bh(&device_spinlock); 595 } 596 } 597 spin_unlock_bh(&device_spinlock); 598 ctx->unhash(sk); 599 } 600 601 static int tls_hw_hash(struct sock *sk) 602 { 603 struct tls_context *ctx = tls_get_ctx(sk); 604 struct tls_device *dev; 605 int err; 606 607 err = ctx->hash(sk); 608 spin_lock_bh(&device_spinlock); 609 list_for_each_entry(dev, &device_list, dev_list) { 610 if (dev->hash) { 611 kref_get(&dev->kref); 612 spin_unlock_bh(&device_spinlock); 613 err |= dev->hash(dev, sk); 614 kref_put(&dev->kref, dev->release); 615 spin_lock_bh(&device_spinlock); 616 } 617 } 618 spin_unlock_bh(&device_spinlock); 619 620 if (err) 621 tls_hw_unhash(sk); 622 return err; 623 } 624 625 static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG], 626 struct proto *base) 627 { 628 prot[TLS_BASE][TLS_BASE] = *base; 629 prot[TLS_BASE][TLS_BASE].setsockopt = tls_setsockopt; 630 prot[TLS_BASE][TLS_BASE].getsockopt = tls_getsockopt; 631 prot[TLS_BASE][TLS_BASE].close = tls_sk_proto_close; 632 633 prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE]; 634 prot[TLS_SW][TLS_BASE].sendmsg = tls_sw_sendmsg; 635 prot[TLS_SW][TLS_BASE].sendpage = tls_sw_sendpage; 636 637 prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE]; 638 prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg; 639 prot[TLS_BASE][TLS_SW].stream_memory_read = tls_sw_stream_read; 640 prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close; 641 642 prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE]; 643 prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg; 644 prot[TLS_SW][TLS_SW].stream_memory_read = tls_sw_stream_read; 645 prot[TLS_SW][TLS_SW].close = tls_sk_proto_close; 646 647 #ifdef CONFIG_TLS_DEVICE 648 prot[TLS_HW][TLS_BASE] = prot[TLS_BASE][TLS_BASE]; 649 prot[TLS_HW][TLS_BASE].sendmsg = tls_device_sendmsg; 650 prot[TLS_HW][TLS_BASE].sendpage = tls_device_sendpage; 651 652 prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW]; 653 prot[TLS_HW][TLS_SW].sendmsg = tls_device_sendmsg; 654 prot[TLS_HW][TLS_SW].sendpage = tls_device_sendpage; 655 656 prot[TLS_BASE][TLS_HW] = prot[TLS_BASE][TLS_SW]; 657 658 prot[TLS_SW][TLS_HW] = prot[TLS_SW][TLS_SW]; 659 660 prot[TLS_HW][TLS_HW] = prot[TLS_HW][TLS_SW]; 661 #endif 662 663 prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base; 664 prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_hw_hash; 665 prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_hw_unhash; 666 prot[TLS_HW_RECORD][TLS_HW_RECORD].close = tls_sk_proto_close; 667 } 668 669 static int tls_init(struct sock *sk) 670 { 671 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; 672 struct tls_context *ctx; 673 int rc = 0; 674 675 if (tls_hw_prot(sk)) 676 goto out; 677 678 /* The TLS ulp is currently supported only for TCP sockets 679 * in ESTABLISHED state. 680 * Supporting sockets in LISTEN state will require us 681 * to modify the accept implementation to clone rather then 682 * share the ulp context. 683 */ 684 if (sk->sk_state != TCP_ESTABLISHED) 685 return -ENOTSUPP; 686 687 /* allocate tls context */ 688 ctx = create_ctx(sk); 689 if (!ctx) { 690 rc = -ENOMEM; 691 goto out; 692 } 693 694 /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */ 695 if (ip_ver == TLSV6 && 696 unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) { 697 mutex_lock(&tcpv6_prot_mutex); 698 if (likely(sk->sk_prot != saved_tcpv6_prot)) { 699 build_protos(tls_prots[TLSV6], sk->sk_prot); 700 smp_store_release(&saved_tcpv6_prot, sk->sk_prot); 701 } 702 mutex_unlock(&tcpv6_prot_mutex); 703 } 704 705 if (ip_ver == TLSV4 && 706 unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv4_prot))) { 707 mutex_lock(&tcpv4_prot_mutex); 708 if (likely(sk->sk_prot != saved_tcpv4_prot)) { 709 build_protos(tls_prots[TLSV4], sk->sk_prot); 710 smp_store_release(&saved_tcpv4_prot, sk->sk_prot); 711 } 712 mutex_unlock(&tcpv4_prot_mutex); 713 } 714 715 ctx->tx_conf = TLS_BASE; 716 ctx->rx_conf = TLS_BASE; 717 update_sk_prot(sk, ctx); 718 out: 719 return rc; 720 } 721 722 void tls_register_device(struct tls_device *device) 723 { 724 spin_lock_bh(&device_spinlock); 725 list_add_tail(&device->dev_list, &device_list); 726 spin_unlock_bh(&device_spinlock); 727 } 728 EXPORT_SYMBOL(tls_register_device); 729 730 void tls_unregister_device(struct tls_device *device) 731 { 732 spin_lock_bh(&device_spinlock); 733 list_del(&device->dev_list); 734 spin_unlock_bh(&device_spinlock); 735 } 736 EXPORT_SYMBOL(tls_unregister_device); 737 738 static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = { 739 .name = "tls", 740 .owner = THIS_MODULE, 741 .init = tls_init, 742 }; 743 744 static int __init tls_register(void) 745 { 746 tls_sw_proto_ops = inet_stream_ops; 747 tls_sw_proto_ops.splice_read = tls_sw_splice_read; 748 749 #ifdef CONFIG_TLS_DEVICE 750 tls_device_init(); 751 #endif 752 tcp_register_ulp(&tcp_tls_ulp_ops); 753 754 return 0; 755 } 756 757 static void __exit tls_unregister(void) 758 { 759 tcp_unregister_ulp(&tcp_tls_ulp_ops); 760 #ifdef CONFIG_TLS_DEVICE 761 tls_device_cleanup(); 762 #endif 763 } 764 765 module_init(tls_register); 766 module_exit(tls_unregister); 767