1 /* 2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/module.h> 35 36 #include <net/tcp.h> 37 #include <net/inet_common.h> 38 #include <linux/highmem.h> 39 #include <linux/netdevice.h> 40 #include <linux/sched/signal.h> 41 #include <linux/inetdevice.h> 42 #include <linux/inet_diag.h> 43 44 #include <net/tls.h> 45 #include <net/tls_toe.h> 46 47 MODULE_AUTHOR("Mellanox Technologies"); 48 MODULE_DESCRIPTION("Transport Layer Security Support"); 49 MODULE_LICENSE("Dual BSD/GPL"); 50 MODULE_ALIAS_TCP_ULP("tls"); 51 52 enum { 53 TLSV4, 54 TLSV6, 55 TLS_NUM_PROTS, 56 }; 57 58 static struct proto *saved_tcpv6_prot; 59 static DEFINE_MUTEX(tcpv6_prot_mutex); 60 static struct proto *saved_tcpv4_prot; 61 static DEFINE_MUTEX(tcpv4_prot_mutex); 62 static LIST_HEAD(device_list); 63 static DEFINE_SPINLOCK(device_spinlock); 64 static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG]; 65 static struct proto_ops tls_sw_proto_ops; 66 static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG], 67 struct proto *base); 68 69 static void update_sk_prot(struct sock *sk, struct tls_context *ctx) 70 { 71 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; 72 73 sk->sk_prot = &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]; 74 } 75 76 int wait_on_pending_writer(struct sock *sk, long *timeo) 77 { 78 int rc = 0; 79 DEFINE_WAIT_FUNC(wait, woken_wake_function); 80 81 add_wait_queue(sk_sleep(sk), &wait); 82 while (1) { 83 if (!*timeo) { 84 rc = -EAGAIN; 85 break; 86 } 87 88 if (signal_pending(current)) { 89 rc = sock_intr_errno(*timeo); 90 break; 91 } 92 93 if (sk_wait_event(sk, timeo, !sk->sk_write_pending, &wait)) 94 break; 95 } 96 remove_wait_queue(sk_sleep(sk), &wait); 97 return rc; 98 } 99 100 int tls_push_sg(struct sock *sk, 101 struct tls_context *ctx, 102 struct scatterlist *sg, 103 u16 first_offset, 104 int flags) 105 { 106 int sendpage_flags = flags | MSG_SENDPAGE_NOTLAST; 107 int ret = 0; 108 struct page *p; 109 size_t size; 110 int offset = first_offset; 111 112 size = sg->length - offset; 113 offset += sg->offset; 114 115 ctx->in_tcp_sendpages = true; 116 while (1) { 117 if (sg_is_last(sg)) 118 sendpage_flags = flags; 119 120 /* is sending application-limited? */ 121 tcp_rate_check_app_limited(sk); 122 p = sg_page(sg); 123 retry: 124 ret = do_tcp_sendpages(sk, p, offset, size, sendpage_flags); 125 126 if (ret != size) { 127 if (ret > 0) { 128 offset += ret; 129 size -= ret; 130 goto retry; 131 } 132 133 offset -= sg->offset; 134 ctx->partially_sent_offset = offset; 135 ctx->partially_sent_record = (void *)sg; 136 ctx->in_tcp_sendpages = false; 137 return ret; 138 } 139 140 put_page(p); 141 sk_mem_uncharge(sk, sg->length); 142 sg = sg_next(sg); 143 if (!sg) 144 break; 145 146 offset = sg->offset; 147 size = sg->length; 148 } 149 150 ctx->in_tcp_sendpages = false; 151 152 return 0; 153 } 154 155 static int tls_handle_open_record(struct sock *sk, int flags) 156 { 157 struct tls_context *ctx = tls_get_ctx(sk); 158 159 if (tls_is_pending_open_record(ctx)) 160 return ctx->push_pending_record(sk, flags); 161 162 return 0; 163 } 164 165 int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg, 166 unsigned char *record_type) 167 { 168 struct cmsghdr *cmsg; 169 int rc = -EINVAL; 170 171 for_each_cmsghdr(cmsg, msg) { 172 if (!CMSG_OK(msg, cmsg)) 173 return -EINVAL; 174 if (cmsg->cmsg_level != SOL_TLS) 175 continue; 176 177 switch (cmsg->cmsg_type) { 178 case TLS_SET_RECORD_TYPE: 179 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*record_type))) 180 return -EINVAL; 181 182 if (msg->msg_flags & MSG_MORE) 183 return -EINVAL; 184 185 rc = tls_handle_open_record(sk, msg->msg_flags); 186 if (rc) 187 return rc; 188 189 *record_type = *(unsigned char *)CMSG_DATA(cmsg); 190 rc = 0; 191 break; 192 default: 193 return -EINVAL; 194 } 195 } 196 197 return rc; 198 } 199 200 int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, 201 int flags) 202 { 203 struct scatterlist *sg; 204 u16 offset; 205 206 sg = ctx->partially_sent_record; 207 offset = ctx->partially_sent_offset; 208 209 ctx->partially_sent_record = NULL; 210 return tls_push_sg(sk, ctx, sg, offset, flags); 211 } 212 213 bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx) 214 { 215 struct scatterlist *sg; 216 217 sg = ctx->partially_sent_record; 218 if (!sg) 219 return false; 220 221 while (1) { 222 put_page(sg_page(sg)); 223 sk_mem_uncharge(sk, sg->length); 224 225 if (sg_is_last(sg)) 226 break; 227 sg++; 228 } 229 ctx->partially_sent_record = NULL; 230 return true; 231 } 232 233 static void tls_write_space(struct sock *sk) 234 { 235 struct tls_context *ctx = tls_get_ctx(sk); 236 237 /* If in_tcp_sendpages call lower protocol write space handler 238 * to ensure we wake up any waiting operations there. For example 239 * if do_tcp_sendpages where to call sk_wait_event. 240 */ 241 if (ctx->in_tcp_sendpages) { 242 ctx->sk_write_space(sk); 243 return; 244 } 245 246 #ifdef CONFIG_TLS_DEVICE 247 if (ctx->tx_conf == TLS_HW) 248 tls_device_write_space(sk, ctx); 249 else 250 #endif 251 tls_sw_write_space(sk, ctx); 252 253 ctx->sk_write_space(sk); 254 } 255 256 /** 257 * tls_ctx_free() - free TLS ULP context 258 * @sk: socket to with @ctx is attached 259 * @ctx: TLS context structure 260 * 261 * Free TLS context. If @sk is %NULL caller guarantees that the socket 262 * to which @ctx was attached has no outstanding references. 263 */ 264 void tls_ctx_free(struct sock *sk, struct tls_context *ctx) 265 { 266 if (!ctx) 267 return; 268 269 memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send)); 270 memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv)); 271 272 if (sk) 273 kfree_rcu(ctx, rcu); 274 else 275 kfree(ctx); 276 } 277 278 static void tls_sk_proto_cleanup(struct sock *sk, 279 struct tls_context *ctx, long timeo) 280 { 281 if (unlikely(sk->sk_write_pending) && 282 !wait_on_pending_writer(sk, &timeo)) 283 tls_handle_open_record(sk, 0); 284 285 /* We need these for tls_sw_fallback handling of other packets */ 286 if (ctx->tx_conf == TLS_SW) { 287 kfree(ctx->tx.rec_seq); 288 kfree(ctx->tx.iv); 289 tls_sw_release_resources_tx(sk); 290 } else if (ctx->tx_conf == TLS_HW) { 291 tls_device_free_resources_tx(sk); 292 } 293 294 if (ctx->rx_conf == TLS_SW) 295 tls_sw_release_resources_rx(sk); 296 else if (ctx->rx_conf == TLS_HW) 297 tls_device_offload_cleanup_rx(sk); 298 } 299 300 static void tls_sk_proto_close(struct sock *sk, long timeout) 301 { 302 struct inet_connection_sock *icsk = inet_csk(sk); 303 struct tls_context *ctx = tls_get_ctx(sk); 304 long timeo = sock_sndtimeo(sk, 0); 305 bool free_ctx; 306 307 if (ctx->tx_conf == TLS_SW) 308 tls_sw_cancel_work_tx(ctx); 309 310 lock_sock(sk); 311 free_ctx = ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW; 312 313 if (ctx->tx_conf != TLS_BASE || ctx->rx_conf != TLS_BASE) 314 tls_sk_proto_cleanup(sk, ctx, timeo); 315 316 write_lock_bh(&sk->sk_callback_lock); 317 if (free_ctx) 318 rcu_assign_pointer(icsk->icsk_ulp_data, NULL); 319 sk->sk_prot = ctx->sk_proto; 320 if (sk->sk_write_space == tls_write_space) 321 sk->sk_write_space = ctx->sk_write_space; 322 write_unlock_bh(&sk->sk_callback_lock); 323 release_sock(sk); 324 if (ctx->tx_conf == TLS_SW) 325 tls_sw_free_ctx_tx(ctx); 326 if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW) 327 tls_sw_strparser_done(ctx); 328 if (ctx->rx_conf == TLS_SW) 329 tls_sw_free_ctx_rx(ctx); 330 ctx->sk_proto->close(sk, timeout); 331 332 if (free_ctx) 333 tls_ctx_free(sk, ctx); 334 } 335 336 static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval, 337 int __user *optlen) 338 { 339 int rc = 0; 340 struct tls_context *ctx = tls_get_ctx(sk); 341 struct tls_crypto_info *crypto_info; 342 int len; 343 344 if (get_user(len, optlen)) 345 return -EFAULT; 346 347 if (!optval || (len < sizeof(*crypto_info))) { 348 rc = -EINVAL; 349 goto out; 350 } 351 352 if (!ctx) { 353 rc = -EBUSY; 354 goto out; 355 } 356 357 /* get user crypto info */ 358 crypto_info = &ctx->crypto_send.info; 359 360 if (!TLS_CRYPTO_INFO_READY(crypto_info)) { 361 rc = -EBUSY; 362 goto out; 363 } 364 365 if (len == sizeof(*crypto_info)) { 366 if (copy_to_user(optval, crypto_info, sizeof(*crypto_info))) 367 rc = -EFAULT; 368 goto out; 369 } 370 371 switch (crypto_info->cipher_type) { 372 case TLS_CIPHER_AES_GCM_128: { 373 struct tls12_crypto_info_aes_gcm_128 * 374 crypto_info_aes_gcm_128 = 375 container_of(crypto_info, 376 struct tls12_crypto_info_aes_gcm_128, 377 info); 378 379 if (len != sizeof(*crypto_info_aes_gcm_128)) { 380 rc = -EINVAL; 381 goto out; 382 } 383 lock_sock(sk); 384 memcpy(crypto_info_aes_gcm_128->iv, 385 ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, 386 TLS_CIPHER_AES_GCM_128_IV_SIZE); 387 memcpy(crypto_info_aes_gcm_128->rec_seq, ctx->tx.rec_seq, 388 TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE); 389 release_sock(sk); 390 if (copy_to_user(optval, 391 crypto_info_aes_gcm_128, 392 sizeof(*crypto_info_aes_gcm_128))) 393 rc = -EFAULT; 394 break; 395 } 396 case TLS_CIPHER_AES_GCM_256: { 397 struct tls12_crypto_info_aes_gcm_256 * 398 crypto_info_aes_gcm_256 = 399 container_of(crypto_info, 400 struct tls12_crypto_info_aes_gcm_256, 401 info); 402 403 if (len != sizeof(*crypto_info_aes_gcm_256)) { 404 rc = -EINVAL; 405 goto out; 406 } 407 lock_sock(sk); 408 memcpy(crypto_info_aes_gcm_256->iv, 409 ctx->tx.iv + TLS_CIPHER_AES_GCM_256_SALT_SIZE, 410 TLS_CIPHER_AES_GCM_256_IV_SIZE); 411 memcpy(crypto_info_aes_gcm_256->rec_seq, ctx->tx.rec_seq, 412 TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE); 413 release_sock(sk); 414 if (copy_to_user(optval, 415 crypto_info_aes_gcm_256, 416 sizeof(*crypto_info_aes_gcm_256))) 417 rc = -EFAULT; 418 break; 419 } 420 default: 421 rc = -EINVAL; 422 } 423 424 out: 425 return rc; 426 } 427 428 static int do_tls_getsockopt(struct sock *sk, int optname, 429 char __user *optval, int __user *optlen) 430 { 431 int rc = 0; 432 433 switch (optname) { 434 case TLS_TX: 435 rc = do_tls_getsockopt_tx(sk, optval, optlen); 436 break; 437 default: 438 rc = -ENOPROTOOPT; 439 break; 440 } 441 return rc; 442 } 443 444 static int tls_getsockopt(struct sock *sk, int level, int optname, 445 char __user *optval, int __user *optlen) 446 { 447 struct tls_context *ctx = tls_get_ctx(sk); 448 449 if (level != SOL_TLS) 450 return ctx->sk_proto->getsockopt(sk, level, 451 optname, optval, optlen); 452 453 return do_tls_getsockopt(sk, optname, optval, optlen); 454 } 455 456 static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval, 457 unsigned int optlen, int tx) 458 { 459 struct tls_crypto_info *crypto_info; 460 struct tls_crypto_info *alt_crypto_info; 461 struct tls_context *ctx = tls_get_ctx(sk); 462 size_t optsize; 463 int rc = 0; 464 int conf; 465 466 if (!optval || (optlen < sizeof(*crypto_info))) { 467 rc = -EINVAL; 468 goto out; 469 } 470 471 if (tx) { 472 crypto_info = &ctx->crypto_send.info; 473 alt_crypto_info = &ctx->crypto_recv.info; 474 } else { 475 crypto_info = &ctx->crypto_recv.info; 476 alt_crypto_info = &ctx->crypto_send.info; 477 } 478 479 /* Currently we don't support set crypto info more than one time */ 480 if (TLS_CRYPTO_INFO_READY(crypto_info)) { 481 rc = -EBUSY; 482 goto out; 483 } 484 485 rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info)); 486 if (rc) { 487 rc = -EFAULT; 488 goto err_crypto_info; 489 } 490 491 /* check version */ 492 if (crypto_info->version != TLS_1_2_VERSION && 493 crypto_info->version != TLS_1_3_VERSION) { 494 rc = -ENOTSUPP; 495 goto err_crypto_info; 496 } 497 498 /* Ensure that TLS version and ciphers are same in both directions */ 499 if (TLS_CRYPTO_INFO_READY(alt_crypto_info)) { 500 if (alt_crypto_info->version != crypto_info->version || 501 alt_crypto_info->cipher_type != crypto_info->cipher_type) { 502 rc = -EINVAL; 503 goto err_crypto_info; 504 } 505 } 506 507 switch (crypto_info->cipher_type) { 508 case TLS_CIPHER_AES_GCM_128: 509 optsize = sizeof(struct tls12_crypto_info_aes_gcm_128); 510 break; 511 case TLS_CIPHER_AES_GCM_256: { 512 optsize = sizeof(struct tls12_crypto_info_aes_gcm_256); 513 break; 514 } 515 case TLS_CIPHER_AES_CCM_128: 516 optsize = sizeof(struct tls12_crypto_info_aes_ccm_128); 517 break; 518 default: 519 rc = -EINVAL; 520 goto err_crypto_info; 521 } 522 523 if (optlen != optsize) { 524 rc = -EINVAL; 525 goto err_crypto_info; 526 } 527 528 rc = copy_from_user(crypto_info + 1, optval + sizeof(*crypto_info), 529 optlen - sizeof(*crypto_info)); 530 if (rc) { 531 rc = -EFAULT; 532 goto err_crypto_info; 533 } 534 535 if (tx) { 536 rc = tls_set_device_offload(sk, ctx); 537 conf = TLS_HW; 538 if (rc) { 539 rc = tls_set_sw_offload(sk, ctx, 1); 540 if (rc) 541 goto err_crypto_info; 542 conf = TLS_SW; 543 } 544 } else { 545 rc = tls_set_device_offload_rx(sk, ctx); 546 conf = TLS_HW; 547 if (rc) { 548 rc = tls_set_sw_offload(sk, ctx, 0); 549 if (rc) 550 goto err_crypto_info; 551 conf = TLS_SW; 552 } 553 tls_sw_strparser_arm(sk, ctx); 554 } 555 556 if (tx) 557 ctx->tx_conf = conf; 558 else 559 ctx->rx_conf = conf; 560 update_sk_prot(sk, ctx); 561 if (tx) { 562 ctx->sk_write_space = sk->sk_write_space; 563 sk->sk_write_space = tls_write_space; 564 } else { 565 sk->sk_socket->ops = &tls_sw_proto_ops; 566 } 567 goto out; 568 569 err_crypto_info: 570 memzero_explicit(crypto_info, sizeof(union tls_crypto_context)); 571 out: 572 return rc; 573 } 574 575 static int do_tls_setsockopt(struct sock *sk, int optname, 576 char __user *optval, unsigned int optlen) 577 { 578 int rc = 0; 579 580 switch (optname) { 581 case TLS_TX: 582 case TLS_RX: 583 lock_sock(sk); 584 rc = do_tls_setsockopt_conf(sk, optval, optlen, 585 optname == TLS_TX); 586 release_sock(sk); 587 break; 588 default: 589 rc = -ENOPROTOOPT; 590 break; 591 } 592 return rc; 593 } 594 595 static int tls_setsockopt(struct sock *sk, int level, int optname, 596 char __user *optval, unsigned int optlen) 597 { 598 struct tls_context *ctx = tls_get_ctx(sk); 599 600 if (level != SOL_TLS) 601 return ctx->sk_proto->setsockopt(sk, level, optname, optval, 602 optlen); 603 604 return do_tls_setsockopt(sk, optname, optval, optlen); 605 } 606 607 static struct tls_context *create_ctx(struct sock *sk) 608 { 609 struct inet_connection_sock *icsk = inet_csk(sk); 610 struct tls_context *ctx; 611 612 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); 613 if (!ctx) 614 return NULL; 615 616 rcu_assign_pointer(icsk->icsk_ulp_data, ctx); 617 ctx->sk_proto = sk->sk_prot; 618 return ctx; 619 } 620 621 static void tls_build_proto(struct sock *sk) 622 { 623 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; 624 625 /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */ 626 if (ip_ver == TLSV6 && 627 unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) { 628 mutex_lock(&tcpv6_prot_mutex); 629 if (likely(sk->sk_prot != saved_tcpv6_prot)) { 630 build_protos(tls_prots[TLSV6], sk->sk_prot); 631 smp_store_release(&saved_tcpv6_prot, sk->sk_prot); 632 } 633 mutex_unlock(&tcpv6_prot_mutex); 634 } 635 636 if (ip_ver == TLSV4 && 637 unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv4_prot))) { 638 mutex_lock(&tcpv4_prot_mutex); 639 if (likely(sk->sk_prot != saved_tcpv4_prot)) { 640 build_protos(tls_prots[TLSV4], sk->sk_prot); 641 smp_store_release(&saved_tcpv4_prot, sk->sk_prot); 642 } 643 mutex_unlock(&tcpv4_prot_mutex); 644 } 645 } 646 647 static void tls_hw_sk_destruct(struct sock *sk) 648 { 649 struct tls_context *ctx = tls_get_ctx(sk); 650 struct inet_connection_sock *icsk = inet_csk(sk); 651 652 ctx->sk_destruct(sk); 653 /* Free ctx */ 654 rcu_assign_pointer(icsk->icsk_ulp_data, NULL); 655 tls_ctx_free(sk, ctx); 656 } 657 658 static int tls_hw_prot(struct sock *sk) 659 { 660 struct tls_toe_device *dev; 661 struct tls_context *ctx; 662 int rc = 0; 663 664 spin_lock_bh(&device_spinlock); 665 list_for_each_entry(dev, &device_list, dev_list) { 666 if (dev->feature && dev->feature(dev)) { 667 ctx = create_ctx(sk); 668 if (!ctx) 669 goto out; 670 671 ctx->sk_destruct = sk->sk_destruct; 672 sk->sk_destruct = tls_hw_sk_destruct; 673 ctx->rx_conf = TLS_HW_RECORD; 674 ctx->tx_conf = TLS_HW_RECORD; 675 update_sk_prot(sk, ctx); 676 rc = 1; 677 break; 678 } 679 } 680 out: 681 spin_unlock_bh(&device_spinlock); 682 return rc; 683 } 684 685 static void tls_hw_unhash(struct sock *sk) 686 { 687 struct tls_context *ctx = tls_get_ctx(sk); 688 struct tls_toe_device *dev; 689 690 spin_lock_bh(&device_spinlock); 691 list_for_each_entry(dev, &device_list, dev_list) { 692 if (dev->unhash) { 693 kref_get(&dev->kref); 694 spin_unlock_bh(&device_spinlock); 695 dev->unhash(dev, sk); 696 kref_put(&dev->kref, dev->release); 697 spin_lock_bh(&device_spinlock); 698 } 699 } 700 spin_unlock_bh(&device_spinlock); 701 ctx->sk_proto->unhash(sk); 702 } 703 704 static int tls_hw_hash(struct sock *sk) 705 { 706 struct tls_context *ctx = tls_get_ctx(sk); 707 struct tls_toe_device *dev; 708 int err; 709 710 err = ctx->sk_proto->hash(sk); 711 spin_lock_bh(&device_spinlock); 712 list_for_each_entry(dev, &device_list, dev_list) { 713 if (dev->hash) { 714 kref_get(&dev->kref); 715 spin_unlock_bh(&device_spinlock); 716 err |= dev->hash(dev, sk); 717 kref_put(&dev->kref, dev->release); 718 spin_lock_bh(&device_spinlock); 719 } 720 } 721 spin_unlock_bh(&device_spinlock); 722 723 if (err) 724 tls_hw_unhash(sk); 725 return err; 726 } 727 728 static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG], 729 struct proto *base) 730 { 731 prot[TLS_BASE][TLS_BASE] = *base; 732 prot[TLS_BASE][TLS_BASE].setsockopt = tls_setsockopt; 733 prot[TLS_BASE][TLS_BASE].getsockopt = tls_getsockopt; 734 prot[TLS_BASE][TLS_BASE].close = tls_sk_proto_close; 735 736 prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE]; 737 prot[TLS_SW][TLS_BASE].sendmsg = tls_sw_sendmsg; 738 prot[TLS_SW][TLS_BASE].sendpage = tls_sw_sendpage; 739 740 prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE]; 741 prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg; 742 prot[TLS_BASE][TLS_SW].stream_memory_read = tls_sw_stream_read; 743 prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close; 744 745 prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE]; 746 prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg; 747 prot[TLS_SW][TLS_SW].stream_memory_read = tls_sw_stream_read; 748 prot[TLS_SW][TLS_SW].close = tls_sk_proto_close; 749 750 #ifdef CONFIG_TLS_DEVICE 751 prot[TLS_HW][TLS_BASE] = prot[TLS_BASE][TLS_BASE]; 752 prot[TLS_HW][TLS_BASE].sendmsg = tls_device_sendmsg; 753 prot[TLS_HW][TLS_BASE].sendpage = tls_device_sendpage; 754 755 prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW]; 756 prot[TLS_HW][TLS_SW].sendmsg = tls_device_sendmsg; 757 prot[TLS_HW][TLS_SW].sendpage = tls_device_sendpage; 758 759 prot[TLS_BASE][TLS_HW] = prot[TLS_BASE][TLS_SW]; 760 761 prot[TLS_SW][TLS_HW] = prot[TLS_SW][TLS_SW]; 762 763 prot[TLS_HW][TLS_HW] = prot[TLS_HW][TLS_SW]; 764 #endif 765 766 prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base; 767 prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_hw_hash; 768 prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_hw_unhash; 769 } 770 771 static int tls_init(struct sock *sk) 772 { 773 struct tls_context *ctx; 774 int rc = 0; 775 776 tls_build_proto(sk); 777 778 if (tls_hw_prot(sk)) 779 return 0; 780 781 /* The TLS ulp is currently supported only for TCP sockets 782 * in ESTABLISHED state. 783 * Supporting sockets in LISTEN state will require us 784 * to modify the accept implementation to clone rather then 785 * share the ulp context. 786 */ 787 if (sk->sk_state != TCP_ESTABLISHED) 788 return -ENOTSUPP; 789 790 /* allocate tls context */ 791 write_lock_bh(&sk->sk_callback_lock); 792 ctx = create_ctx(sk); 793 if (!ctx) { 794 rc = -ENOMEM; 795 goto out; 796 } 797 798 ctx->tx_conf = TLS_BASE; 799 ctx->rx_conf = TLS_BASE; 800 update_sk_prot(sk, ctx); 801 out: 802 write_unlock_bh(&sk->sk_callback_lock); 803 return rc; 804 } 805 806 static void tls_update(struct sock *sk, struct proto *p) 807 { 808 struct tls_context *ctx; 809 810 ctx = tls_get_ctx(sk); 811 if (likely(ctx)) 812 ctx->sk_proto = p; 813 else 814 sk->sk_prot = p; 815 } 816 817 static int tls_get_info(const struct sock *sk, struct sk_buff *skb) 818 { 819 u16 version, cipher_type; 820 struct tls_context *ctx; 821 struct nlattr *start; 822 int err; 823 824 start = nla_nest_start_noflag(skb, INET_ULP_INFO_TLS); 825 if (!start) 826 return -EMSGSIZE; 827 828 rcu_read_lock(); 829 ctx = rcu_dereference(inet_csk(sk)->icsk_ulp_data); 830 if (!ctx) { 831 err = 0; 832 goto nla_failure; 833 } 834 version = ctx->prot_info.version; 835 if (version) { 836 err = nla_put_u16(skb, TLS_INFO_VERSION, version); 837 if (err) 838 goto nla_failure; 839 } 840 cipher_type = ctx->prot_info.cipher_type; 841 if (cipher_type) { 842 err = nla_put_u16(skb, TLS_INFO_CIPHER, cipher_type); 843 if (err) 844 goto nla_failure; 845 } 846 err = nla_put_u16(skb, TLS_INFO_TXCONF, tls_user_config(ctx, true)); 847 if (err) 848 goto nla_failure; 849 850 err = nla_put_u16(skb, TLS_INFO_RXCONF, tls_user_config(ctx, false)); 851 if (err) 852 goto nla_failure; 853 854 rcu_read_unlock(); 855 nla_nest_end(skb, start); 856 return 0; 857 858 nla_failure: 859 rcu_read_unlock(); 860 nla_nest_cancel(skb, start); 861 return err; 862 } 863 864 static size_t tls_get_info_size(const struct sock *sk) 865 { 866 size_t size = 0; 867 868 size += nla_total_size(0) + /* INET_ULP_INFO_TLS */ 869 nla_total_size(sizeof(u16)) + /* TLS_INFO_VERSION */ 870 nla_total_size(sizeof(u16)) + /* TLS_INFO_CIPHER */ 871 nla_total_size(sizeof(u16)) + /* TLS_INFO_RXCONF */ 872 nla_total_size(sizeof(u16)) + /* TLS_INFO_TXCONF */ 873 0; 874 875 return size; 876 } 877 878 void tls_toe_register_device(struct tls_toe_device *device) 879 { 880 spin_lock_bh(&device_spinlock); 881 list_add_tail(&device->dev_list, &device_list); 882 spin_unlock_bh(&device_spinlock); 883 } 884 EXPORT_SYMBOL(tls_toe_register_device); 885 886 void tls_toe_unregister_device(struct tls_toe_device *device) 887 { 888 spin_lock_bh(&device_spinlock); 889 list_del(&device->dev_list); 890 spin_unlock_bh(&device_spinlock); 891 } 892 EXPORT_SYMBOL(tls_toe_unregister_device); 893 894 static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = { 895 .name = "tls", 896 .owner = THIS_MODULE, 897 .init = tls_init, 898 .update = tls_update, 899 .get_info = tls_get_info, 900 .get_info_size = tls_get_info_size, 901 }; 902 903 static int __init tls_register(void) 904 { 905 tls_sw_proto_ops = inet_stream_ops; 906 tls_sw_proto_ops.splice_read = tls_sw_splice_read; 907 908 tls_device_init(); 909 tcp_register_ulp(&tcp_tls_ulp_ops); 910 911 return 0; 912 } 913 914 static void __exit tls_unregister(void) 915 { 916 tcp_unregister_ulp(&tcp_tls_ulp_ops); 917 tls_device_cleanup(); 918 } 919 920 module_init(tls_register); 921 module_exit(tls_unregister); 922