1 /* 2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/module.h> 35 36 #include <net/tcp.h> 37 #include <net/inet_common.h> 38 #include <linux/highmem.h> 39 #include <linux/netdevice.h> 40 #include <linux/sched/signal.h> 41 #include <linux/inetdevice.h> 42 43 #include <net/tls.h> 44 45 MODULE_AUTHOR("Mellanox Technologies"); 46 MODULE_DESCRIPTION("Transport Layer Security Support"); 47 MODULE_LICENSE("Dual BSD/GPL"); 48 MODULE_ALIAS_TCP_ULP("tls"); 49 50 enum { 51 TLSV4, 52 TLSV6, 53 TLS_NUM_PROTS, 54 }; 55 56 static struct proto *saved_tcpv6_prot; 57 static DEFINE_MUTEX(tcpv6_prot_mutex); 58 static struct proto *saved_tcpv4_prot; 59 static DEFINE_MUTEX(tcpv4_prot_mutex); 60 static LIST_HEAD(device_list); 61 static DEFINE_SPINLOCK(device_spinlock); 62 static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG]; 63 static struct proto_ops tls_sw_proto_ops; 64 static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG], 65 struct proto *base); 66 67 static void update_sk_prot(struct sock *sk, struct tls_context *ctx) 68 { 69 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; 70 71 sk->sk_prot = &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]; 72 } 73 74 int wait_on_pending_writer(struct sock *sk, long *timeo) 75 { 76 int rc = 0; 77 DEFINE_WAIT_FUNC(wait, woken_wake_function); 78 79 add_wait_queue(sk_sleep(sk), &wait); 80 while (1) { 81 if (!*timeo) { 82 rc = -EAGAIN; 83 break; 84 } 85 86 if (signal_pending(current)) { 87 rc = sock_intr_errno(*timeo); 88 break; 89 } 90 91 if (sk_wait_event(sk, timeo, !sk->sk_write_pending, &wait)) 92 break; 93 } 94 remove_wait_queue(sk_sleep(sk), &wait); 95 return rc; 96 } 97 98 int tls_push_sg(struct sock *sk, 99 struct tls_context *ctx, 100 struct scatterlist *sg, 101 u16 first_offset, 102 int flags) 103 { 104 int sendpage_flags = flags | MSG_SENDPAGE_NOTLAST; 105 int ret = 0; 106 struct page *p; 107 size_t size; 108 int offset = first_offset; 109 110 size = sg->length - offset; 111 offset += sg->offset; 112 113 ctx->in_tcp_sendpages = true; 114 while (1) { 115 if (sg_is_last(sg)) 116 sendpage_flags = flags; 117 118 /* is sending application-limited? */ 119 tcp_rate_check_app_limited(sk); 120 p = sg_page(sg); 121 retry: 122 ret = do_tcp_sendpages(sk, p, offset, size, sendpage_flags); 123 124 if (ret != size) { 125 if (ret > 0) { 126 offset += ret; 127 size -= ret; 128 goto retry; 129 } 130 131 offset -= sg->offset; 132 ctx->partially_sent_offset = offset; 133 ctx->partially_sent_record = (void *)sg; 134 ctx->in_tcp_sendpages = false; 135 return ret; 136 } 137 138 put_page(p); 139 sk_mem_uncharge(sk, sg->length); 140 sg = sg_next(sg); 141 if (!sg) 142 break; 143 144 offset = sg->offset; 145 size = sg->length; 146 } 147 148 ctx->in_tcp_sendpages = false; 149 ctx->sk_write_space(sk); 150 151 return 0; 152 } 153 154 static int tls_handle_open_record(struct sock *sk, int flags) 155 { 156 struct tls_context *ctx = tls_get_ctx(sk); 157 158 if (tls_is_pending_open_record(ctx)) 159 return ctx->push_pending_record(sk, flags); 160 161 return 0; 162 } 163 164 int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg, 165 unsigned char *record_type) 166 { 167 struct cmsghdr *cmsg; 168 int rc = -EINVAL; 169 170 for_each_cmsghdr(cmsg, msg) { 171 if (!CMSG_OK(msg, cmsg)) 172 return -EINVAL; 173 if (cmsg->cmsg_level != SOL_TLS) 174 continue; 175 176 switch (cmsg->cmsg_type) { 177 case TLS_SET_RECORD_TYPE: 178 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*record_type))) 179 return -EINVAL; 180 181 if (msg->msg_flags & MSG_MORE) 182 return -EINVAL; 183 184 rc = tls_handle_open_record(sk, msg->msg_flags); 185 if (rc) 186 return rc; 187 188 *record_type = *(unsigned char *)CMSG_DATA(cmsg); 189 rc = 0; 190 break; 191 default: 192 return -EINVAL; 193 } 194 } 195 196 return rc; 197 } 198 199 int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, 200 int flags) 201 { 202 struct scatterlist *sg; 203 u16 offset; 204 205 sg = ctx->partially_sent_record; 206 offset = ctx->partially_sent_offset; 207 208 ctx->partially_sent_record = NULL; 209 return tls_push_sg(sk, ctx, sg, offset, flags); 210 } 211 212 int tls_push_pending_closed_record(struct sock *sk, 213 struct tls_context *tls_ctx, 214 int flags, long *timeo) 215 { 216 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 217 218 if (tls_is_partially_sent_record(tls_ctx) || 219 !list_empty(&ctx->tx_list)) 220 return tls_tx_records(sk, flags); 221 else 222 return tls_ctx->push_pending_record(sk, flags); 223 } 224 225 static void tls_write_space(struct sock *sk) 226 { 227 struct tls_context *ctx = tls_get_ctx(sk); 228 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx); 229 230 /* If in_tcp_sendpages call lower protocol write space handler 231 * to ensure we wake up any waiting operations there. For example 232 * if do_tcp_sendpages where to call sk_wait_event. 233 */ 234 if (ctx->in_tcp_sendpages) { 235 ctx->sk_write_space(sk); 236 return; 237 } 238 239 /* Schedule the transmission if tx list is ready */ 240 if (is_tx_ready(tx_ctx) && !sk->sk_write_pending) { 241 /* Schedule the transmission */ 242 if (!test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask)) 243 schedule_delayed_work(&tx_ctx->tx_work.work, 0); 244 } 245 246 ctx->sk_write_space(sk); 247 } 248 249 static void tls_ctx_free(struct tls_context *ctx) 250 { 251 if (!ctx) 252 return; 253 254 memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send)); 255 memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv)); 256 kfree(ctx); 257 } 258 259 static void tls_sk_proto_close(struct sock *sk, long timeout) 260 { 261 struct tls_context *ctx = tls_get_ctx(sk); 262 long timeo = sock_sndtimeo(sk, 0); 263 void (*sk_proto_close)(struct sock *sk, long timeout); 264 bool free_ctx = false; 265 266 lock_sock(sk); 267 sk_proto_close = ctx->sk_proto_close; 268 269 if (ctx->tx_conf == TLS_HW_RECORD && ctx->rx_conf == TLS_HW_RECORD) 270 goto skip_tx_cleanup; 271 272 if (ctx->tx_conf == TLS_BASE && ctx->rx_conf == TLS_BASE) { 273 free_ctx = true; 274 goto skip_tx_cleanup; 275 } 276 277 if (!tls_complete_pending_work(sk, ctx, 0, &timeo)) 278 tls_handle_open_record(sk, 0); 279 280 /* We need these for tls_sw_fallback handling of other packets */ 281 if (ctx->tx_conf == TLS_SW) { 282 kfree(ctx->tx.rec_seq); 283 kfree(ctx->tx.iv); 284 tls_sw_free_resources_tx(sk); 285 } 286 287 if (ctx->rx_conf == TLS_SW) { 288 kfree(ctx->rx.rec_seq); 289 kfree(ctx->rx.iv); 290 tls_sw_free_resources_rx(sk); 291 } 292 293 #ifdef CONFIG_TLS_DEVICE 294 if (ctx->rx_conf == TLS_HW) 295 tls_device_offload_cleanup_rx(sk); 296 297 if (ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW) { 298 #else 299 { 300 #endif 301 tls_ctx_free(ctx); 302 ctx = NULL; 303 } 304 305 skip_tx_cleanup: 306 release_sock(sk); 307 sk_proto_close(sk, timeout); 308 /* free ctx for TLS_HW_RECORD, used by tcp_set_state 309 * for sk->sk_prot->unhash [tls_hw_unhash] 310 */ 311 if (free_ctx) 312 tls_ctx_free(ctx); 313 } 314 315 static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval, 316 int __user *optlen) 317 { 318 int rc = 0; 319 struct tls_context *ctx = tls_get_ctx(sk); 320 struct tls_crypto_info *crypto_info; 321 int len; 322 323 if (get_user(len, optlen)) 324 return -EFAULT; 325 326 if (!optval || (len < sizeof(*crypto_info))) { 327 rc = -EINVAL; 328 goto out; 329 } 330 331 if (!ctx) { 332 rc = -EBUSY; 333 goto out; 334 } 335 336 /* get user crypto info */ 337 crypto_info = &ctx->crypto_send.info; 338 339 if (!TLS_CRYPTO_INFO_READY(crypto_info)) { 340 rc = -EBUSY; 341 goto out; 342 } 343 344 if (len == sizeof(*crypto_info)) { 345 if (copy_to_user(optval, crypto_info, sizeof(*crypto_info))) 346 rc = -EFAULT; 347 goto out; 348 } 349 350 switch (crypto_info->cipher_type) { 351 case TLS_CIPHER_AES_GCM_128: { 352 struct tls12_crypto_info_aes_gcm_128 * 353 crypto_info_aes_gcm_128 = 354 container_of(crypto_info, 355 struct tls12_crypto_info_aes_gcm_128, 356 info); 357 358 if (len != sizeof(*crypto_info_aes_gcm_128)) { 359 rc = -EINVAL; 360 goto out; 361 } 362 lock_sock(sk); 363 memcpy(crypto_info_aes_gcm_128->iv, 364 ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, 365 TLS_CIPHER_AES_GCM_128_IV_SIZE); 366 memcpy(crypto_info_aes_gcm_128->rec_seq, ctx->tx.rec_seq, 367 TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE); 368 release_sock(sk); 369 if (copy_to_user(optval, 370 crypto_info_aes_gcm_128, 371 sizeof(*crypto_info_aes_gcm_128))) 372 rc = -EFAULT; 373 break; 374 } 375 default: 376 rc = -EINVAL; 377 } 378 379 out: 380 return rc; 381 } 382 383 static int do_tls_getsockopt(struct sock *sk, int optname, 384 char __user *optval, int __user *optlen) 385 { 386 int rc = 0; 387 388 switch (optname) { 389 case TLS_TX: 390 rc = do_tls_getsockopt_tx(sk, optval, optlen); 391 break; 392 default: 393 rc = -ENOPROTOOPT; 394 break; 395 } 396 return rc; 397 } 398 399 static int tls_getsockopt(struct sock *sk, int level, int optname, 400 char __user *optval, int __user *optlen) 401 { 402 struct tls_context *ctx = tls_get_ctx(sk); 403 404 if (level != SOL_TLS) 405 return ctx->getsockopt(sk, level, optname, optval, optlen); 406 407 return do_tls_getsockopt(sk, optname, optval, optlen); 408 } 409 410 static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval, 411 unsigned int optlen, int tx) 412 { 413 struct tls_crypto_info *crypto_info; 414 struct tls_context *ctx = tls_get_ctx(sk); 415 int rc = 0; 416 int conf; 417 418 if (!optval || (optlen < sizeof(*crypto_info))) { 419 rc = -EINVAL; 420 goto out; 421 } 422 423 if (tx) 424 crypto_info = &ctx->crypto_send.info; 425 else 426 crypto_info = &ctx->crypto_recv.info; 427 428 /* Currently we don't support set crypto info more than one time */ 429 if (TLS_CRYPTO_INFO_READY(crypto_info)) { 430 rc = -EBUSY; 431 goto out; 432 } 433 434 rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info)); 435 if (rc) { 436 rc = -EFAULT; 437 goto err_crypto_info; 438 } 439 440 /* check version */ 441 if (crypto_info->version != TLS_1_2_VERSION) { 442 rc = -ENOTSUPP; 443 goto err_crypto_info; 444 } 445 446 switch (crypto_info->cipher_type) { 447 case TLS_CIPHER_AES_GCM_128: { 448 if (optlen != sizeof(struct tls12_crypto_info_aes_gcm_128)) { 449 rc = -EINVAL; 450 goto err_crypto_info; 451 } 452 rc = copy_from_user(crypto_info + 1, optval + sizeof(*crypto_info), 453 optlen - sizeof(*crypto_info)); 454 if (rc) { 455 rc = -EFAULT; 456 goto err_crypto_info; 457 } 458 break; 459 } 460 default: 461 rc = -EINVAL; 462 goto err_crypto_info; 463 } 464 465 if (tx) { 466 #ifdef CONFIG_TLS_DEVICE 467 rc = tls_set_device_offload(sk, ctx); 468 conf = TLS_HW; 469 if (rc) { 470 #else 471 { 472 #endif 473 rc = tls_set_sw_offload(sk, ctx, 1); 474 conf = TLS_SW; 475 } 476 } else { 477 #ifdef CONFIG_TLS_DEVICE 478 rc = tls_set_device_offload_rx(sk, ctx); 479 conf = TLS_HW; 480 if (rc) { 481 #else 482 { 483 #endif 484 rc = tls_set_sw_offload(sk, ctx, 0); 485 conf = TLS_SW; 486 } 487 } 488 489 if (rc) 490 goto err_crypto_info; 491 492 if (tx) 493 ctx->tx_conf = conf; 494 else 495 ctx->rx_conf = conf; 496 update_sk_prot(sk, ctx); 497 if (tx) { 498 ctx->sk_write_space = sk->sk_write_space; 499 sk->sk_write_space = tls_write_space; 500 } else { 501 sk->sk_socket->ops = &tls_sw_proto_ops; 502 } 503 goto out; 504 505 err_crypto_info: 506 memzero_explicit(crypto_info, sizeof(union tls_crypto_context)); 507 out: 508 return rc; 509 } 510 511 static int do_tls_setsockopt(struct sock *sk, int optname, 512 char __user *optval, unsigned int optlen) 513 { 514 int rc = 0; 515 516 switch (optname) { 517 case TLS_TX: 518 case TLS_RX: 519 lock_sock(sk); 520 rc = do_tls_setsockopt_conf(sk, optval, optlen, 521 optname == TLS_TX); 522 release_sock(sk); 523 break; 524 default: 525 rc = -ENOPROTOOPT; 526 break; 527 } 528 return rc; 529 } 530 531 static int tls_setsockopt(struct sock *sk, int level, int optname, 532 char __user *optval, unsigned int optlen) 533 { 534 struct tls_context *ctx = tls_get_ctx(sk); 535 536 if (level != SOL_TLS) 537 return ctx->setsockopt(sk, level, optname, optval, optlen); 538 539 return do_tls_setsockopt(sk, optname, optval, optlen); 540 } 541 542 static struct tls_context *create_ctx(struct sock *sk) 543 { 544 struct inet_connection_sock *icsk = inet_csk(sk); 545 struct tls_context *ctx; 546 547 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); 548 if (!ctx) 549 return NULL; 550 551 icsk->icsk_ulp_data = ctx; 552 ctx->setsockopt = sk->sk_prot->setsockopt; 553 ctx->getsockopt = sk->sk_prot->getsockopt; 554 ctx->sk_proto_close = sk->sk_prot->close; 555 return ctx; 556 } 557 558 static void tls_build_proto(struct sock *sk) 559 { 560 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; 561 562 /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */ 563 if (ip_ver == TLSV6 && 564 unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) { 565 mutex_lock(&tcpv6_prot_mutex); 566 if (likely(sk->sk_prot != saved_tcpv6_prot)) { 567 build_protos(tls_prots[TLSV6], sk->sk_prot); 568 smp_store_release(&saved_tcpv6_prot, sk->sk_prot); 569 } 570 mutex_unlock(&tcpv6_prot_mutex); 571 } 572 573 if (ip_ver == TLSV4 && 574 unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv4_prot))) { 575 mutex_lock(&tcpv4_prot_mutex); 576 if (likely(sk->sk_prot != saved_tcpv4_prot)) { 577 build_protos(tls_prots[TLSV4], sk->sk_prot); 578 smp_store_release(&saved_tcpv4_prot, sk->sk_prot); 579 } 580 mutex_unlock(&tcpv4_prot_mutex); 581 } 582 } 583 584 static void tls_hw_sk_destruct(struct sock *sk) 585 { 586 struct tls_context *ctx = tls_get_ctx(sk); 587 struct inet_connection_sock *icsk = inet_csk(sk); 588 589 ctx->sk_destruct(sk); 590 /* Free ctx */ 591 kfree(ctx); 592 icsk->icsk_ulp_data = NULL; 593 } 594 595 static int tls_hw_prot(struct sock *sk) 596 { 597 struct tls_context *ctx; 598 struct tls_device *dev; 599 int rc = 0; 600 601 spin_lock_bh(&device_spinlock); 602 list_for_each_entry(dev, &device_list, dev_list) { 603 if (dev->feature && dev->feature(dev)) { 604 ctx = create_ctx(sk); 605 if (!ctx) 606 goto out; 607 608 spin_unlock_bh(&device_spinlock); 609 tls_build_proto(sk); 610 ctx->hash = sk->sk_prot->hash; 611 ctx->unhash = sk->sk_prot->unhash; 612 ctx->sk_proto_close = sk->sk_prot->close; 613 ctx->sk_destruct = sk->sk_destruct; 614 sk->sk_destruct = tls_hw_sk_destruct; 615 ctx->rx_conf = TLS_HW_RECORD; 616 ctx->tx_conf = TLS_HW_RECORD; 617 update_sk_prot(sk, ctx); 618 spin_lock_bh(&device_spinlock); 619 rc = 1; 620 break; 621 } 622 } 623 out: 624 spin_unlock_bh(&device_spinlock); 625 return rc; 626 } 627 628 static void tls_hw_unhash(struct sock *sk) 629 { 630 struct tls_context *ctx = tls_get_ctx(sk); 631 struct tls_device *dev; 632 633 spin_lock_bh(&device_spinlock); 634 list_for_each_entry(dev, &device_list, dev_list) { 635 if (dev->unhash) { 636 kref_get(&dev->kref); 637 spin_unlock_bh(&device_spinlock); 638 dev->unhash(dev, sk); 639 kref_put(&dev->kref, dev->release); 640 spin_lock_bh(&device_spinlock); 641 } 642 } 643 spin_unlock_bh(&device_spinlock); 644 ctx->unhash(sk); 645 } 646 647 static int tls_hw_hash(struct sock *sk) 648 { 649 struct tls_context *ctx = tls_get_ctx(sk); 650 struct tls_device *dev; 651 int err; 652 653 err = ctx->hash(sk); 654 spin_lock_bh(&device_spinlock); 655 list_for_each_entry(dev, &device_list, dev_list) { 656 if (dev->hash) { 657 kref_get(&dev->kref); 658 spin_unlock_bh(&device_spinlock); 659 err |= dev->hash(dev, sk); 660 kref_put(&dev->kref, dev->release); 661 spin_lock_bh(&device_spinlock); 662 } 663 } 664 spin_unlock_bh(&device_spinlock); 665 666 if (err) 667 tls_hw_unhash(sk); 668 return err; 669 } 670 671 static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG], 672 struct proto *base) 673 { 674 prot[TLS_BASE][TLS_BASE] = *base; 675 prot[TLS_BASE][TLS_BASE].setsockopt = tls_setsockopt; 676 prot[TLS_BASE][TLS_BASE].getsockopt = tls_getsockopt; 677 prot[TLS_BASE][TLS_BASE].close = tls_sk_proto_close; 678 679 prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE]; 680 prot[TLS_SW][TLS_BASE].sendmsg = tls_sw_sendmsg; 681 prot[TLS_SW][TLS_BASE].sendpage = tls_sw_sendpage; 682 683 prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE]; 684 prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg; 685 prot[TLS_BASE][TLS_SW].stream_memory_read = tls_sw_stream_read; 686 prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close; 687 688 prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE]; 689 prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg; 690 prot[TLS_SW][TLS_SW].stream_memory_read = tls_sw_stream_read; 691 prot[TLS_SW][TLS_SW].close = tls_sk_proto_close; 692 693 #ifdef CONFIG_TLS_DEVICE 694 prot[TLS_HW][TLS_BASE] = prot[TLS_BASE][TLS_BASE]; 695 prot[TLS_HW][TLS_BASE].sendmsg = tls_device_sendmsg; 696 prot[TLS_HW][TLS_BASE].sendpage = tls_device_sendpage; 697 698 prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW]; 699 prot[TLS_HW][TLS_SW].sendmsg = tls_device_sendmsg; 700 prot[TLS_HW][TLS_SW].sendpage = tls_device_sendpage; 701 702 prot[TLS_BASE][TLS_HW] = prot[TLS_BASE][TLS_SW]; 703 704 prot[TLS_SW][TLS_HW] = prot[TLS_SW][TLS_SW]; 705 706 prot[TLS_HW][TLS_HW] = prot[TLS_HW][TLS_SW]; 707 #endif 708 709 prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base; 710 prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_hw_hash; 711 prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_hw_unhash; 712 prot[TLS_HW_RECORD][TLS_HW_RECORD].close = tls_sk_proto_close; 713 } 714 715 static int tls_init(struct sock *sk) 716 { 717 struct tls_context *ctx; 718 int rc = 0; 719 720 if (tls_hw_prot(sk)) 721 goto out; 722 723 /* The TLS ulp is currently supported only for TCP sockets 724 * in ESTABLISHED state. 725 * Supporting sockets in LISTEN state will require us 726 * to modify the accept implementation to clone rather then 727 * share the ulp context. 728 */ 729 if (sk->sk_state != TCP_ESTABLISHED) 730 return -ENOTSUPP; 731 732 /* allocate tls context */ 733 ctx = create_ctx(sk); 734 if (!ctx) { 735 rc = -ENOMEM; 736 goto out; 737 } 738 739 tls_build_proto(sk); 740 ctx->tx_conf = TLS_BASE; 741 ctx->rx_conf = TLS_BASE; 742 update_sk_prot(sk, ctx); 743 out: 744 return rc; 745 } 746 747 void tls_register_device(struct tls_device *device) 748 { 749 spin_lock_bh(&device_spinlock); 750 list_add_tail(&device->dev_list, &device_list); 751 spin_unlock_bh(&device_spinlock); 752 } 753 EXPORT_SYMBOL(tls_register_device); 754 755 void tls_unregister_device(struct tls_device *device) 756 { 757 spin_lock_bh(&device_spinlock); 758 list_del(&device->dev_list); 759 spin_unlock_bh(&device_spinlock); 760 } 761 EXPORT_SYMBOL(tls_unregister_device); 762 763 static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = { 764 .name = "tls", 765 .owner = THIS_MODULE, 766 .init = tls_init, 767 }; 768 769 static int __init tls_register(void) 770 { 771 tls_sw_proto_ops = inet_stream_ops; 772 tls_sw_proto_ops.splice_read = tls_sw_splice_read; 773 774 #ifdef CONFIG_TLS_DEVICE 775 tls_device_init(); 776 #endif 777 tcp_register_ulp(&tcp_tls_ulp_ops); 778 779 return 0; 780 } 781 782 static void __exit tls_unregister(void) 783 { 784 tcp_unregister_ulp(&tcp_tls_ulp_ops); 785 #ifdef CONFIG_TLS_DEVICE 786 tls_device_cleanup(); 787 #endif 788 } 789 790 module_init(tls_register); 791 module_exit(tls_unregister); 792