1 /* 2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/module.h> 35 36 #include <net/tcp.h> 37 #include <net/inet_common.h> 38 #include <linux/highmem.h> 39 #include <linux/netdevice.h> 40 #include <linux/sched/signal.h> 41 #include <linux/inetdevice.h> 42 #include <linux/inet_diag.h> 43 44 #include <net/snmp.h> 45 #include <net/tls.h> 46 #include <net/tls_toe.h> 47 48 #include "tls.h" 49 50 MODULE_AUTHOR("Mellanox Technologies"); 51 MODULE_DESCRIPTION("Transport Layer Security Support"); 52 MODULE_LICENSE("Dual BSD/GPL"); 53 MODULE_ALIAS_TCP_ULP("tls"); 54 55 enum { 56 TLSV4, 57 TLSV6, 58 TLS_NUM_PROTS, 59 }; 60 61 #define CIPHER_DESC(cipher) [cipher - TLS_CIPHER_MIN] = { \ 62 .iv = cipher ## _IV_SIZE, \ 63 .key = cipher ## _KEY_SIZE, \ 64 .salt = cipher ## _SALT_SIZE, \ 65 .tag = cipher ## _TAG_SIZE, \ 66 .rec_seq = cipher ## _REC_SEQ_SIZE, \ 67 } 68 69 const struct tls_cipher_desc tls_cipher_desc[TLS_CIPHER_MAX + 1 - TLS_CIPHER_MIN] = { 70 CIPHER_DESC(TLS_CIPHER_AES_GCM_128), 71 CIPHER_DESC(TLS_CIPHER_AES_GCM_256), 72 CIPHER_DESC(TLS_CIPHER_AES_CCM_128), 73 CIPHER_DESC(TLS_CIPHER_CHACHA20_POLY1305), 74 CIPHER_DESC(TLS_CIPHER_SM4_GCM), 75 CIPHER_DESC(TLS_CIPHER_SM4_CCM), 76 CIPHER_DESC(TLS_CIPHER_ARIA_GCM_128), 77 CIPHER_DESC(TLS_CIPHER_ARIA_GCM_256), 78 }; 79 80 static const struct proto *saved_tcpv6_prot; 81 static DEFINE_MUTEX(tcpv6_prot_mutex); 82 static const struct proto *saved_tcpv4_prot; 83 static DEFINE_MUTEX(tcpv4_prot_mutex); 84 static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG]; 85 static struct proto_ops tls_proto_ops[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG]; 86 static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG], 87 const struct proto *base); 88 89 void update_sk_prot(struct sock *sk, struct tls_context *ctx) 90 { 91 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; 92 93 WRITE_ONCE(sk->sk_prot, 94 &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]); 95 WRITE_ONCE(sk->sk_socket->ops, 96 &tls_proto_ops[ip_ver][ctx->tx_conf][ctx->rx_conf]); 97 } 98 99 int wait_on_pending_writer(struct sock *sk, long *timeo) 100 { 101 int rc = 0; 102 DEFINE_WAIT_FUNC(wait, woken_wake_function); 103 104 add_wait_queue(sk_sleep(sk), &wait); 105 while (1) { 106 if (!*timeo) { 107 rc = -EAGAIN; 108 break; 109 } 110 111 if (signal_pending(current)) { 112 rc = sock_intr_errno(*timeo); 113 break; 114 } 115 116 if (sk_wait_event(sk, timeo, 117 !READ_ONCE(sk->sk_write_pending), &wait)) 118 break; 119 } 120 remove_wait_queue(sk_sleep(sk), &wait); 121 return rc; 122 } 123 124 int tls_push_sg(struct sock *sk, 125 struct tls_context *ctx, 126 struct scatterlist *sg, 127 u16 first_offset, 128 int flags) 129 { 130 struct bio_vec bvec; 131 struct msghdr msg = { 132 .msg_flags = MSG_SPLICE_PAGES | flags, 133 }; 134 int ret = 0; 135 struct page *p; 136 size_t size; 137 int offset = first_offset; 138 139 size = sg->length - offset; 140 offset += sg->offset; 141 142 ctx->splicing_pages = true; 143 while (1) { 144 /* is sending application-limited? */ 145 tcp_rate_check_app_limited(sk); 146 p = sg_page(sg); 147 retry: 148 bvec_set_page(&bvec, p, size, offset); 149 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); 150 151 ret = tcp_sendmsg_locked(sk, &msg, size); 152 153 if (ret != size) { 154 if (ret > 0) { 155 offset += ret; 156 size -= ret; 157 goto retry; 158 } 159 160 offset -= sg->offset; 161 ctx->partially_sent_offset = offset; 162 ctx->partially_sent_record = (void *)sg; 163 ctx->splicing_pages = false; 164 return ret; 165 } 166 167 put_page(p); 168 sk_mem_uncharge(sk, sg->length); 169 sg = sg_next(sg); 170 if (!sg) 171 break; 172 173 offset = sg->offset; 174 size = sg->length; 175 } 176 177 ctx->splicing_pages = false; 178 179 return 0; 180 } 181 182 static int tls_handle_open_record(struct sock *sk, int flags) 183 { 184 struct tls_context *ctx = tls_get_ctx(sk); 185 186 if (tls_is_pending_open_record(ctx)) 187 return ctx->push_pending_record(sk, flags); 188 189 return 0; 190 } 191 192 int tls_process_cmsg(struct sock *sk, struct msghdr *msg, 193 unsigned char *record_type) 194 { 195 struct cmsghdr *cmsg; 196 int rc = -EINVAL; 197 198 for_each_cmsghdr(cmsg, msg) { 199 if (!CMSG_OK(msg, cmsg)) 200 return -EINVAL; 201 if (cmsg->cmsg_level != SOL_TLS) 202 continue; 203 204 switch (cmsg->cmsg_type) { 205 case TLS_SET_RECORD_TYPE: 206 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*record_type))) 207 return -EINVAL; 208 209 if (msg->msg_flags & MSG_MORE) 210 return -EINVAL; 211 212 rc = tls_handle_open_record(sk, msg->msg_flags); 213 if (rc) 214 return rc; 215 216 *record_type = *(unsigned char *)CMSG_DATA(cmsg); 217 rc = 0; 218 break; 219 default: 220 return -EINVAL; 221 } 222 } 223 224 return rc; 225 } 226 227 int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, 228 int flags) 229 { 230 struct scatterlist *sg; 231 u16 offset; 232 233 sg = ctx->partially_sent_record; 234 offset = ctx->partially_sent_offset; 235 236 ctx->partially_sent_record = NULL; 237 return tls_push_sg(sk, ctx, sg, offset, flags); 238 } 239 240 void tls_free_partial_record(struct sock *sk, struct tls_context *ctx) 241 { 242 struct scatterlist *sg; 243 244 for (sg = ctx->partially_sent_record; sg; sg = sg_next(sg)) { 245 put_page(sg_page(sg)); 246 sk_mem_uncharge(sk, sg->length); 247 } 248 ctx->partially_sent_record = NULL; 249 } 250 251 static void tls_write_space(struct sock *sk) 252 { 253 struct tls_context *ctx = tls_get_ctx(sk); 254 255 /* If splicing_pages call lower protocol write space handler 256 * to ensure we wake up any waiting operations there. For example 257 * if splicing pages where to call sk_wait_event. 258 */ 259 if (ctx->splicing_pages) { 260 ctx->sk_write_space(sk); 261 return; 262 } 263 264 #ifdef CONFIG_TLS_DEVICE 265 if (ctx->tx_conf == TLS_HW) 266 tls_device_write_space(sk, ctx); 267 else 268 #endif 269 tls_sw_write_space(sk, ctx); 270 271 ctx->sk_write_space(sk); 272 } 273 274 /** 275 * tls_ctx_free() - free TLS ULP context 276 * @sk: socket to with @ctx is attached 277 * @ctx: TLS context structure 278 * 279 * Free TLS context. If @sk is %NULL caller guarantees that the socket 280 * to which @ctx was attached has no outstanding references. 281 */ 282 void tls_ctx_free(struct sock *sk, struct tls_context *ctx) 283 { 284 if (!ctx) 285 return; 286 287 memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send)); 288 memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv)); 289 mutex_destroy(&ctx->tx_lock); 290 291 if (sk) 292 kfree_rcu(ctx, rcu); 293 else 294 kfree(ctx); 295 } 296 297 static void tls_sk_proto_cleanup(struct sock *sk, 298 struct tls_context *ctx, long timeo) 299 { 300 if (unlikely(sk->sk_write_pending) && 301 !wait_on_pending_writer(sk, &timeo)) 302 tls_handle_open_record(sk, 0); 303 304 /* We need these for tls_sw_fallback handling of other packets */ 305 if (ctx->tx_conf == TLS_SW) { 306 kfree(ctx->tx.rec_seq); 307 kfree(ctx->tx.iv); 308 tls_sw_release_resources_tx(sk); 309 TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXSW); 310 } else if (ctx->tx_conf == TLS_HW) { 311 tls_device_free_resources_tx(sk); 312 TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXDEVICE); 313 } 314 315 if (ctx->rx_conf == TLS_SW) { 316 tls_sw_release_resources_rx(sk); 317 TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXSW); 318 } else if (ctx->rx_conf == TLS_HW) { 319 tls_device_offload_cleanup_rx(sk); 320 TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXDEVICE); 321 } 322 } 323 324 static void tls_sk_proto_close(struct sock *sk, long timeout) 325 { 326 struct inet_connection_sock *icsk = inet_csk(sk); 327 struct tls_context *ctx = tls_get_ctx(sk); 328 long timeo = sock_sndtimeo(sk, 0); 329 bool free_ctx; 330 331 if (ctx->tx_conf == TLS_SW) 332 tls_sw_cancel_work_tx(ctx); 333 334 lock_sock(sk); 335 free_ctx = ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW; 336 337 if (ctx->tx_conf != TLS_BASE || ctx->rx_conf != TLS_BASE) 338 tls_sk_proto_cleanup(sk, ctx, timeo); 339 340 write_lock_bh(&sk->sk_callback_lock); 341 if (free_ctx) 342 rcu_assign_pointer(icsk->icsk_ulp_data, NULL); 343 WRITE_ONCE(sk->sk_prot, ctx->sk_proto); 344 if (sk->sk_write_space == tls_write_space) 345 sk->sk_write_space = ctx->sk_write_space; 346 write_unlock_bh(&sk->sk_callback_lock); 347 release_sock(sk); 348 if (ctx->tx_conf == TLS_SW) 349 tls_sw_free_ctx_tx(ctx); 350 if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW) 351 tls_sw_strparser_done(ctx); 352 if (ctx->rx_conf == TLS_SW) 353 tls_sw_free_ctx_rx(ctx); 354 ctx->sk_proto->close(sk, timeout); 355 356 if (free_ctx) 357 tls_ctx_free(sk, ctx); 358 } 359 360 static __poll_t tls_sk_poll(struct file *file, struct socket *sock, 361 struct poll_table_struct *wait) 362 { 363 struct tls_sw_context_rx *ctx; 364 struct tls_context *tls_ctx; 365 struct sock *sk = sock->sk; 366 struct sk_psock *psock; 367 __poll_t mask = 0; 368 u8 shutdown; 369 int state; 370 371 mask = tcp_poll(file, sock, wait); 372 373 state = inet_sk_state_load(sk); 374 shutdown = READ_ONCE(sk->sk_shutdown); 375 if (unlikely(state != TCP_ESTABLISHED || shutdown & RCV_SHUTDOWN)) 376 return mask; 377 378 tls_ctx = tls_get_ctx(sk); 379 ctx = tls_sw_ctx_rx(tls_ctx); 380 psock = sk_psock_get(sk); 381 382 if (skb_queue_empty_lockless(&ctx->rx_list) && 383 !tls_strp_msg_ready(ctx) && 384 sk_psock_queue_empty(psock)) 385 mask &= ~(EPOLLIN | EPOLLRDNORM); 386 387 if (psock) 388 sk_psock_put(sk, psock); 389 390 return mask; 391 } 392 393 static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval, 394 int __user *optlen, int tx) 395 { 396 int rc = 0; 397 struct tls_context *ctx = tls_get_ctx(sk); 398 struct tls_crypto_info *crypto_info; 399 struct cipher_context *cctx; 400 int len; 401 402 if (get_user(len, optlen)) 403 return -EFAULT; 404 405 if (!optval || (len < sizeof(*crypto_info))) { 406 rc = -EINVAL; 407 goto out; 408 } 409 410 if (!ctx) { 411 rc = -EBUSY; 412 goto out; 413 } 414 415 /* get user crypto info */ 416 if (tx) { 417 crypto_info = &ctx->crypto_send.info; 418 cctx = &ctx->tx; 419 } else { 420 crypto_info = &ctx->crypto_recv.info; 421 cctx = &ctx->rx; 422 } 423 424 if (!TLS_CRYPTO_INFO_READY(crypto_info)) { 425 rc = -EBUSY; 426 goto out; 427 } 428 429 if (len == sizeof(*crypto_info)) { 430 if (copy_to_user(optval, crypto_info, sizeof(*crypto_info))) 431 rc = -EFAULT; 432 goto out; 433 } 434 435 switch (crypto_info->cipher_type) { 436 case TLS_CIPHER_AES_GCM_128: { 437 struct tls12_crypto_info_aes_gcm_128 * 438 crypto_info_aes_gcm_128 = 439 container_of(crypto_info, 440 struct tls12_crypto_info_aes_gcm_128, 441 info); 442 443 if (len != sizeof(*crypto_info_aes_gcm_128)) { 444 rc = -EINVAL; 445 goto out; 446 } 447 memcpy(crypto_info_aes_gcm_128->iv, 448 cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, 449 TLS_CIPHER_AES_GCM_128_IV_SIZE); 450 memcpy(crypto_info_aes_gcm_128->rec_seq, cctx->rec_seq, 451 TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE); 452 if (copy_to_user(optval, 453 crypto_info_aes_gcm_128, 454 sizeof(*crypto_info_aes_gcm_128))) 455 rc = -EFAULT; 456 break; 457 } 458 case TLS_CIPHER_AES_GCM_256: { 459 struct tls12_crypto_info_aes_gcm_256 * 460 crypto_info_aes_gcm_256 = 461 container_of(crypto_info, 462 struct tls12_crypto_info_aes_gcm_256, 463 info); 464 465 if (len != sizeof(*crypto_info_aes_gcm_256)) { 466 rc = -EINVAL; 467 goto out; 468 } 469 memcpy(crypto_info_aes_gcm_256->iv, 470 cctx->iv + TLS_CIPHER_AES_GCM_256_SALT_SIZE, 471 TLS_CIPHER_AES_GCM_256_IV_SIZE); 472 memcpy(crypto_info_aes_gcm_256->rec_seq, cctx->rec_seq, 473 TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE); 474 if (copy_to_user(optval, 475 crypto_info_aes_gcm_256, 476 sizeof(*crypto_info_aes_gcm_256))) 477 rc = -EFAULT; 478 break; 479 } 480 case TLS_CIPHER_AES_CCM_128: { 481 struct tls12_crypto_info_aes_ccm_128 *aes_ccm_128 = 482 container_of(crypto_info, 483 struct tls12_crypto_info_aes_ccm_128, info); 484 485 if (len != sizeof(*aes_ccm_128)) { 486 rc = -EINVAL; 487 goto out; 488 } 489 memcpy(aes_ccm_128->iv, 490 cctx->iv + TLS_CIPHER_AES_CCM_128_SALT_SIZE, 491 TLS_CIPHER_AES_CCM_128_IV_SIZE); 492 memcpy(aes_ccm_128->rec_seq, cctx->rec_seq, 493 TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE); 494 if (copy_to_user(optval, aes_ccm_128, sizeof(*aes_ccm_128))) 495 rc = -EFAULT; 496 break; 497 } 498 case TLS_CIPHER_CHACHA20_POLY1305: { 499 struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305 = 500 container_of(crypto_info, 501 struct tls12_crypto_info_chacha20_poly1305, 502 info); 503 504 if (len != sizeof(*chacha20_poly1305)) { 505 rc = -EINVAL; 506 goto out; 507 } 508 memcpy(chacha20_poly1305->iv, 509 cctx->iv + TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE, 510 TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE); 511 memcpy(chacha20_poly1305->rec_seq, cctx->rec_seq, 512 TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE); 513 if (copy_to_user(optval, chacha20_poly1305, 514 sizeof(*chacha20_poly1305))) 515 rc = -EFAULT; 516 break; 517 } 518 case TLS_CIPHER_SM4_GCM: { 519 struct tls12_crypto_info_sm4_gcm *sm4_gcm_info = 520 container_of(crypto_info, 521 struct tls12_crypto_info_sm4_gcm, info); 522 523 if (len != sizeof(*sm4_gcm_info)) { 524 rc = -EINVAL; 525 goto out; 526 } 527 memcpy(sm4_gcm_info->iv, 528 cctx->iv + TLS_CIPHER_SM4_GCM_SALT_SIZE, 529 TLS_CIPHER_SM4_GCM_IV_SIZE); 530 memcpy(sm4_gcm_info->rec_seq, cctx->rec_seq, 531 TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE); 532 if (copy_to_user(optval, sm4_gcm_info, sizeof(*sm4_gcm_info))) 533 rc = -EFAULT; 534 break; 535 } 536 case TLS_CIPHER_SM4_CCM: { 537 struct tls12_crypto_info_sm4_ccm *sm4_ccm_info = 538 container_of(crypto_info, 539 struct tls12_crypto_info_sm4_ccm, info); 540 541 if (len != sizeof(*sm4_ccm_info)) { 542 rc = -EINVAL; 543 goto out; 544 } 545 memcpy(sm4_ccm_info->iv, 546 cctx->iv + TLS_CIPHER_SM4_CCM_SALT_SIZE, 547 TLS_CIPHER_SM4_CCM_IV_SIZE); 548 memcpy(sm4_ccm_info->rec_seq, cctx->rec_seq, 549 TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE); 550 if (copy_to_user(optval, sm4_ccm_info, sizeof(*sm4_ccm_info))) 551 rc = -EFAULT; 552 break; 553 } 554 case TLS_CIPHER_ARIA_GCM_128: { 555 struct tls12_crypto_info_aria_gcm_128 * 556 crypto_info_aria_gcm_128 = 557 container_of(crypto_info, 558 struct tls12_crypto_info_aria_gcm_128, 559 info); 560 561 if (len != sizeof(*crypto_info_aria_gcm_128)) { 562 rc = -EINVAL; 563 goto out; 564 } 565 memcpy(crypto_info_aria_gcm_128->iv, 566 cctx->iv + TLS_CIPHER_ARIA_GCM_128_SALT_SIZE, 567 TLS_CIPHER_ARIA_GCM_128_IV_SIZE); 568 memcpy(crypto_info_aria_gcm_128->rec_seq, cctx->rec_seq, 569 TLS_CIPHER_ARIA_GCM_128_REC_SEQ_SIZE); 570 if (copy_to_user(optval, 571 crypto_info_aria_gcm_128, 572 sizeof(*crypto_info_aria_gcm_128))) 573 rc = -EFAULT; 574 break; 575 } 576 case TLS_CIPHER_ARIA_GCM_256: { 577 struct tls12_crypto_info_aria_gcm_256 * 578 crypto_info_aria_gcm_256 = 579 container_of(crypto_info, 580 struct tls12_crypto_info_aria_gcm_256, 581 info); 582 583 if (len != sizeof(*crypto_info_aria_gcm_256)) { 584 rc = -EINVAL; 585 goto out; 586 } 587 memcpy(crypto_info_aria_gcm_256->iv, 588 cctx->iv + TLS_CIPHER_ARIA_GCM_256_SALT_SIZE, 589 TLS_CIPHER_ARIA_GCM_256_IV_SIZE); 590 memcpy(crypto_info_aria_gcm_256->rec_seq, cctx->rec_seq, 591 TLS_CIPHER_ARIA_GCM_256_REC_SEQ_SIZE); 592 if (copy_to_user(optval, 593 crypto_info_aria_gcm_256, 594 sizeof(*crypto_info_aria_gcm_256))) 595 rc = -EFAULT; 596 break; 597 } 598 default: 599 rc = -EINVAL; 600 } 601 602 out: 603 return rc; 604 } 605 606 static int do_tls_getsockopt_tx_zc(struct sock *sk, char __user *optval, 607 int __user *optlen) 608 { 609 struct tls_context *ctx = tls_get_ctx(sk); 610 unsigned int value; 611 int len; 612 613 if (get_user(len, optlen)) 614 return -EFAULT; 615 616 if (len != sizeof(value)) 617 return -EINVAL; 618 619 value = ctx->zerocopy_sendfile; 620 if (copy_to_user(optval, &value, sizeof(value))) 621 return -EFAULT; 622 623 return 0; 624 } 625 626 static int do_tls_getsockopt_no_pad(struct sock *sk, char __user *optval, 627 int __user *optlen) 628 { 629 struct tls_context *ctx = tls_get_ctx(sk); 630 int value, len; 631 632 if (ctx->prot_info.version != TLS_1_3_VERSION) 633 return -EINVAL; 634 635 if (get_user(len, optlen)) 636 return -EFAULT; 637 if (len < sizeof(value)) 638 return -EINVAL; 639 640 value = -EINVAL; 641 if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW) 642 value = ctx->rx_no_pad; 643 if (value < 0) 644 return value; 645 646 if (put_user(sizeof(value), optlen)) 647 return -EFAULT; 648 if (copy_to_user(optval, &value, sizeof(value))) 649 return -EFAULT; 650 651 return 0; 652 } 653 654 static int do_tls_getsockopt(struct sock *sk, int optname, 655 char __user *optval, int __user *optlen) 656 { 657 int rc = 0; 658 659 lock_sock(sk); 660 661 switch (optname) { 662 case TLS_TX: 663 case TLS_RX: 664 rc = do_tls_getsockopt_conf(sk, optval, optlen, 665 optname == TLS_TX); 666 break; 667 case TLS_TX_ZEROCOPY_RO: 668 rc = do_tls_getsockopt_tx_zc(sk, optval, optlen); 669 break; 670 case TLS_RX_EXPECT_NO_PAD: 671 rc = do_tls_getsockopt_no_pad(sk, optval, optlen); 672 break; 673 default: 674 rc = -ENOPROTOOPT; 675 break; 676 } 677 678 release_sock(sk); 679 680 return rc; 681 } 682 683 static int tls_getsockopt(struct sock *sk, int level, int optname, 684 char __user *optval, int __user *optlen) 685 { 686 struct tls_context *ctx = tls_get_ctx(sk); 687 688 if (level != SOL_TLS) 689 return ctx->sk_proto->getsockopt(sk, level, 690 optname, optval, optlen); 691 692 return do_tls_getsockopt(sk, optname, optval, optlen); 693 } 694 695 static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval, 696 unsigned int optlen, int tx) 697 { 698 struct tls_crypto_info *crypto_info; 699 struct tls_crypto_info *alt_crypto_info; 700 struct tls_context *ctx = tls_get_ctx(sk); 701 size_t optsize; 702 int rc = 0; 703 int conf; 704 705 if (sockptr_is_null(optval) || (optlen < sizeof(*crypto_info))) 706 return -EINVAL; 707 708 if (tx) { 709 crypto_info = &ctx->crypto_send.info; 710 alt_crypto_info = &ctx->crypto_recv.info; 711 } else { 712 crypto_info = &ctx->crypto_recv.info; 713 alt_crypto_info = &ctx->crypto_send.info; 714 } 715 716 /* Currently we don't support set crypto info more than one time */ 717 if (TLS_CRYPTO_INFO_READY(crypto_info)) 718 return -EBUSY; 719 720 rc = copy_from_sockptr(crypto_info, optval, sizeof(*crypto_info)); 721 if (rc) { 722 rc = -EFAULT; 723 goto err_crypto_info; 724 } 725 726 /* check version */ 727 if (crypto_info->version != TLS_1_2_VERSION && 728 crypto_info->version != TLS_1_3_VERSION) { 729 rc = -EINVAL; 730 goto err_crypto_info; 731 } 732 733 /* Ensure that TLS version and ciphers are same in both directions */ 734 if (TLS_CRYPTO_INFO_READY(alt_crypto_info)) { 735 if (alt_crypto_info->version != crypto_info->version || 736 alt_crypto_info->cipher_type != crypto_info->cipher_type) { 737 rc = -EINVAL; 738 goto err_crypto_info; 739 } 740 } 741 742 switch (crypto_info->cipher_type) { 743 case TLS_CIPHER_AES_GCM_128: 744 optsize = sizeof(struct tls12_crypto_info_aes_gcm_128); 745 break; 746 case TLS_CIPHER_AES_GCM_256: { 747 optsize = sizeof(struct tls12_crypto_info_aes_gcm_256); 748 break; 749 } 750 case TLS_CIPHER_AES_CCM_128: 751 optsize = sizeof(struct tls12_crypto_info_aes_ccm_128); 752 break; 753 case TLS_CIPHER_CHACHA20_POLY1305: 754 optsize = sizeof(struct tls12_crypto_info_chacha20_poly1305); 755 break; 756 case TLS_CIPHER_SM4_GCM: 757 optsize = sizeof(struct tls12_crypto_info_sm4_gcm); 758 break; 759 case TLS_CIPHER_SM4_CCM: 760 optsize = sizeof(struct tls12_crypto_info_sm4_ccm); 761 break; 762 case TLS_CIPHER_ARIA_GCM_128: 763 if (crypto_info->version != TLS_1_2_VERSION) { 764 rc = -EINVAL; 765 goto err_crypto_info; 766 } 767 optsize = sizeof(struct tls12_crypto_info_aria_gcm_128); 768 break; 769 case TLS_CIPHER_ARIA_GCM_256: 770 if (crypto_info->version != TLS_1_2_VERSION) { 771 rc = -EINVAL; 772 goto err_crypto_info; 773 } 774 optsize = sizeof(struct tls12_crypto_info_aria_gcm_256); 775 break; 776 default: 777 rc = -EINVAL; 778 goto err_crypto_info; 779 } 780 781 if (optlen != optsize) { 782 rc = -EINVAL; 783 goto err_crypto_info; 784 } 785 786 rc = copy_from_sockptr_offset(crypto_info + 1, optval, 787 sizeof(*crypto_info), 788 optlen - sizeof(*crypto_info)); 789 if (rc) { 790 rc = -EFAULT; 791 goto err_crypto_info; 792 } 793 794 if (tx) { 795 rc = tls_set_device_offload(sk, ctx); 796 conf = TLS_HW; 797 if (!rc) { 798 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSTXDEVICE); 799 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXDEVICE); 800 } else { 801 rc = tls_set_sw_offload(sk, ctx, 1); 802 if (rc) 803 goto err_crypto_info; 804 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSTXSW); 805 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXSW); 806 conf = TLS_SW; 807 } 808 } else { 809 rc = tls_set_device_offload_rx(sk, ctx); 810 conf = TLS_HW; 811 if (!rc) { 812 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICE); 813 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXDEVICE); 814 } else { 815 rc = tls_set_sw_offload(sk, ctx, 0); 816 if (rc) 817 goto err_crypto_info; 818 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXSW); 819 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXSW); 820 conf = TLS_SW; 821 } 822 tls_sw_strparser_arm(sk, ctx); 823 } 824 825 if (tx) 826 ctx->tx_conf = conf; 827 else 828 ctx->rx_conf = conf; 829 update_sk_prot(sk, ctx); 830 if (tx) { 831 ctx->sk_write_space = sk->sk_write_space; 832 sk->sk_write_space = tls_write_space; 833 } else { 834 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(ctx); 835 836 tls_strp_check_rcv(&rx_ctx->strp); 837 } 838 return 0; 839 840 err_crypto_info: 841 memzero_explicit(crypto_info, sizeof(union tls_crypto_context)); 842 return rc; 843 } 844 845 static int do_tls_setsockopt_tx_zc(struct sock *sk, sockptr_t optval, 846 unsigned int optlen) 847 { 848 struct tls_context *ctx = tls_get_ctx(sk); 849 unsigned int value; 850 851 if (sockptr_is_null(optval) || optlen != sizeof(value)) 852 return -EINVAL; 853 854 if (copy_from_sockptr(&value, optval, sizeof(value))) 855 return -EFAULT; 856 857 if (value > 1) 858 return -EINVAL; 859 860 ctx->zerocopy_sendfile = value; 861 862 return 0; 863 } 864 865 static int do_tls_setsockopt_no_pad(struct sock *sk, sockptr_t optval, 866 unsigned int optlen) 867 { 868 struct tls_context *ctx = tls_get_ctx(sk); 869 u32 val; 870 int rc; 871 872 if (ctx->prot_info.version != TLS_1_3_VERSION || 873 sockptr_is_null(optval) || optlen < sizeof(val)) 874 return -EINVAL; 875 876 rc = copy_from_sockptr(&val, optval, sizeof(val)); 877 if (rc) 878 return -EFAULT; 879 if (val > 1) 880 return -EINVAL; 881 rc = check_zeroed_sockptr(optval, sizeof(val), optlen - sizeof(val)); 882 if (rc < 1) 883 return rc == 0 ? -EINVAL : rc; 884 885 lock_sock(sk); 886 rc = -EINVAL; 887 if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW) { 888 ctx->rx_no_pad = val; 889 tls_update_rx_zc_capable(ctx); 890 rc = 0; 891 } 892 release_sock(sk); 893 894 return rc; 895 } 896 897 static int do_tls_setsockopt(struct sock *sk, int optname, sockptr_t optval, 898 unsigned int optlen) 899 { 900 int rc = 0; 901 902 switch (optname) { 903 case TLS_TX: 904 case TLS_RX: 905 lock_sock(sk); 906 rc = do_tls_setsockopt_conf(sk, optval, optlen, 907 optname == TLS_TX); 908 release_sock(sk); 909 break; 910 case TLS_TX_ZEROCOPY_RO: 911 lock_sock(sk); 912 rc = do_tls_setsockopt_tx_zc(sk, optval, optlen); 913 release_sock(sk); 914 break; 915 case TLS_RX_EXPECT_NO_PAD: 916 rc = do_tls_setsockopt_no_pad(sk, optval, optlen); 917 break; 918 default: 919 rc = -ENOPROTOOPT; 920 break; 921 } 922 return rc; 923 } 924 925 static int tls_setsockopt(struct sock *sk, int level, int optname, 926 sockptr_t optval, unsigned int optlen) 927 { 928 struct tls_context *ctx = tls_get_ctx(sk); 929 930 if (level != SOL_TLS) 931 return ctx->sk_proto->setsockopt(sk, level, optname, optval, 932 optlen); 933 934 return do_tls_setsockopt(sk, optname, optval, optlen); 935 } 936 937 struct tls_context *tls_ctx_create(struct sock *sk) 938 { 939 struct inet_connection_sock *icsk = inet_csk(sk); 940 struct tls_context *ctx; 941 942 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); 943 if (!ctx) 944 return NULL; 945 946 mutex_init(&ctx->tx_lock); 947 rcu_assign_pointer(icsk->icsk_ulp_data, ctx); 948 ctx->sk_proto = READ_ONCE(sk->sk_prot); 949 ctx->sk = sk; 950 return ctx; 951 } 952 953 static void build_proto_ops(struct proto_ops ops[TLS_NUM_CONFIG][TLS_NUM_CONFIG], 954 const struct proto_ops *base) 955 { 956 ops[TLS_BASE][TLS_BASE] = *base; 957 958 ops[TLS_SW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE]; 959 ops[TLS_SW ][TLS_BASE].splice_eof = tls_sw_splice_eof; 960 961 ops[TLS_BASE][TLS_SW ] = ops[TLS_BASE][TLS_BASE]; 962 ops[TLS_BASE][TLS_SW ].splice_read = tls_sw_splice_read; 963 ops[TLS_BASE][TLS_SW ].poll = tls_sk_poll; 964 ops[TLS_BASE][TLS_SW ].read_sock = tls_sw_read_sock; 965 966 ops[TLS_SW ][TLS_SW ] = ops[TLS_SW ][TLS_BASE]; 967 ops[TLS_SW ][TLS_SW ].splice_read = tls_sw_splice_read; 968 ops[TLS_SW ][TLS_SW ].poll = tls_sk_poll; 969 ops[TLS_SW ][TLS_SW ].read_sock = tls_sw_read_sock; 970 971 #ifdef CONFIG_TLS_DEVICE 972 ops[TLS_HW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE]; 973 974 ops[TLS_HW ][TLS_SW ] = ops[TLS_BASE][TLS_SW ]; 975 976 ops[TLS_BASE][TLS_HW ] = ops[TLS_BASE][TLS_SW ]; 977 978 ops[TLS_SW ][TLS_HW ] = ops[TLS_SW ][TLS_SW ]; 979 980 ops[TLS_HW ][TLS_HW ] = ops[TLS_HW ][TLS_SW ]; 981 #endif 982 #ifdef CONFIG_TLS_TOE 983 ops[TLS_HW_RECORD][TLS_HW_RECORD] = *base; 984 #endif 985 } 986 987 static void tls_build_proto(struct sock *sk) 988 { 989 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; 990 struct proto *prot = READ_ONCE(sk->sk_prot); 991 992 /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */ 993 if (ip_ver == TLSV6 && 994 unlikely(prot != smp_load_acquire(&saved_tcpv6_prot))) { 995 mutex_lock(&tcpv6_prot_mutex); 996 if (likely(prot != saved_tcpv6_prot)) { 997 build_protos(tls_prots[TLSV6], prot); 998 build_proto_ops(tls_proto_ops[TLSV6], 999 sk->sk_socket->ops); 1000 smp_store_release(&saved_tcpv6_prot, prot); 1001 } 1002 mutex_unlock(&tcpv6_prot_mutex); 1003 } 1004 1005 if (ip_ver == TLSV4 && 1006 unlikely(prot != smp_load_acquire(&saved_tcpv4_prot))) { 1007 mutex_lock(&tcpv4_prot_mutex); 1008 if (likely(prot != saved_tcpv4_prot)) { 1009 build_protos(tls_prots[TLSV4], prot); 1010 build_proto_ops(tls_proto_ops[TLSV4], 1011 sk->sk_socket->ops); 1012 smp_store_release(&saved_tcpv4_prot, prot); 1013 } 1014 mutex_unlock(&tcpv4_prot_mutex); 1015 } 1016 } 1017 1018 static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG], 1019 const struct proto *base) 1020 { 1021 prot[TLS_BASE][TLS_BASE] = *base; 1022 prot[TLS_BASE][TLS_BASE].setsockopt = tls_setsockopt; 1023 prot[TLS_BASE][TLS_BASE].getsockopt = tls_getsockopt; 1024 prot[TLS_BASE][TLS_BASE].close = tls_sk_proto_close; 1025 1026 prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE]; 1027 prot[TLS_SW][TLS_BASE].sendmsg = tls_sw_sendmsg; 1028 prot[TLS_SW][TLS_BASE].splice_eof = tls_sw_splice_eof; 1029 1030 prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE]; 1031 prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg; 1032 prot[TLS_BASE][TLS_SW].sock_is_readable = tls_sw_sock_is_readable; 1033 prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close; 1034 1035 prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE]; 1036 prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg; 1037 prot[TLS_SW][TLS_SW].sock_is_readable = tls_sw_sock_is_readable; 1038 prot[TLS_SW][TLS_SW].close = tls_sk_proto_close; 1039 1040 #ifdef CONFIG_TLS_DEVICE 1041 prot[TLS_HW][TLS_BASE] = prot[TLS_BASE][TLS_BASE]; 1042 prot[TLS_HW][TLS_BASE].sendmsg = tls_device_sendmsg; 1043 prot[TLS_HW][TLS_BASE].splice_eof = tls_device_splice_eof; 1044 1045 prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW]; 1046 prot[TLS_HW][TLS_SW].sendmsg = tls_device_sendmsg; 1047 prot[TLS_HW][TLS_SW].splice_eof = tls_device_splice_eof; 1048 1049 prot[TLS_BASE][TLS_HW] = prot[TLS_BASE][TLS_SW]; 1050 1051 prot[TLS_SW][TLS_HW] = prot[TLS_SW][TLS_SW]; 1052 1053 prot[TLS_HW][TLS_HW] = prot[TLS_HW][TLS_SW]; 1054 #endif 1055 #ifdef CONFIG_TLS_TOE 1056 prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base; 1057 prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_toe_hash; 1058 prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_toe_unhash; 1059 #endif 1060 } 1061 1062 static int tls_init(struct sock *sk) 1063 { 1064 struct tls_context *ctx; 1065 int rc = 0; 1066 1067 tls_build_proto(sk); 1068 1069 #ifdef CONFIG_TLS_TOE 1070 if (tls_toe_bypass(sk)) 1071 return 0; 1072 #endif 1073 1074 /* The TLS ulp is currently supported only for TCP sockets 1075 * in ESTABLISHED state. 1076 * Supporting sockets in LISTEN state will require us 1077 * to modify the accept implementation to clone rather then 1078 * share the ulp context. 1079 */ 1080 if (sk->sk_state != TCP_ESTABLISHED) 1081 return -ENOTCONN; 1082 1083 /* allocate tls context */ 1084 write_lock_bh(&sk->sk_callback_lock); 1085 ctx = tls_ctx_create(sk); 1086 if (!ctx) { 1087 rc = -ENOMEM; 1088 goto out; 1089 } 1090 1091 ctx->tx_conf = TLS_BASE; 1092 ctx->rx_conf = TLS_BASE; 1093 update_sk_prot(sk, ctx); 1094 out: 1095 write_unlock_bh(&sk->sk_callback_lock); 1096 return rc; 1097 } 1098 1099 static void tls_update(struct sock *sk, struct proto *p, 1100 void (*write_space)(struct sock *sk)) 1101 { 1102 struct tls_context *ctx; 1103 1104 WARN_ON_ONCE(sk->sk_prot == p); 1105 1106 ctx = tls_get_ctx(sk); 1107 if (likely(ctx)) { 1108 ctx->sk_write_space = write_space; 1109 ctx->sk_proto = p; 1110 } else { 1111 /* Pairs with lockless read in sk_clone_lock(). */ 1112 WRITE_ONCE(sk->sk_prot, p); 1113 sk->sk_write_space = write_space; 1114 } 1115 } 1116 1117 static u16 tls_user_config(struct tls_context *ctx, bool tx) 1118 { 1119 u16 config = tx ? ctx->tx_conf : ctx->rx_conf; 1120 1121 switch (config) { 1122 case TLS_BASE: 1123 return TLS_CONF_BASE; 1124 case TLS_SW: 1125 return TLS_CONF_SW; 1126 case TLS_HW: 1127 return TLS_CONF_HW; 1128 case TLS_HW_RECORD: 1129 return TLS_CONF_HW_RECORD; 1130 } 1131 return 0; 1132 } 1133 1134 static int tls_get_info(const struct sock *sk, struct sk_buff *skb) 1135 { 1136 u16 version, cipher_type; 1137 struct tls_context *ctx; 1138 struct nlattr *start; 1139 int err; 1140 1141 start = nla_nest_start_noflag(skb, INET_ULP_INFO_TLS); 1142 if (!start) 1143 return -EMSGSIZE; 1144 1145 rcu_read_lock(); 1146 ctx = rcu_dereference(inet_csk(sk)->icsk_ulp_data); 1147 if (!ctx) { 1148 err = 0; 1149 goto nla_failure; 1150 } 1151 version = ctx->prot_info.version; 1152 if (version) { 1153 err = nla_put_u16(skb, TLS_INFO_VERSION, version); 1154 if (err) 1155 goto nla_failure; 1156 } 1157 cipher_type = ctx->prot_info.cipher_type; 1158 if (cipher_type) { 1159 err = nla_put_u16(skb, TLS_INFO_CIPHER, cipher_type); 1160 if (err) 1161 goto nla_failure; 1162 } 1163 err = nla_put_u16(skb, TLS_INFO_TXCONF, tls_user_config(ctx, true)); 1164 if (err) 1165 goto nla_failure; 1166 1167 err = nla_put_u16(skb, TLS_INFO_RXCONF, tls_user_config(ctx, false)); 1168 if (err) 1169 goto nla_failure; 1170 1171 if (ctx->tx_conf == TLS_HW && ctx->zerocopy_sendfile) { 1172 err = nla_put_flag(skb, TLS_INFO_ZC_RO_TX); 1173 if (err) 1174 goto nla_failure; 1175 } 1176 if (ctx->rx_no_pad) { 1177 err = nla_put_flag(skb, TLS_INFO_RX_NO_PAD); 1178 if (err) 1179 goto nla_failure; 1180 } 1181 1182 rcu_read_unlock(); 1183 nla_nest_end(skb, start); 1184 return 0; 1185 1186 nla_failure: 1187 rcu_read_unlock(); 1188 nla_nest_cancel(skb, start); 1189 return err; 1190 } 1191 1192 static size_t tls_get_info_size(const struct sock *sk) 1193 { 1194 size_t size = 0; 1195 1196 size += nla_total_size(0) + /* INET_ULP_INFO_TLS */ 1197 nla_total_size(sizeof(u16)) + /* TLS_INFO_VERSION */ 1198 nla_total_size(sizeof(u16)) + /* TLS_INFO_CIPHER */ 1199 nla_total_size(sizeof(u16)) + /* TLS_INFO_RXCONF */ 1200 nla_total_size(sizeof(u16)) + /* TLS_INFO_TXCONF */ 1201 nla_total_size(0) + /* TLS_INFO_ZC_RO_TX */ 1202 nla_total_size(0) + /* TLS_INFO_RX_NO_PAD */ 1203 0; 1204 1205 return size; 1206 } 1207 1208 static int __net_init tls_init_net(struct net *net) 1209 { 1210 int err; 1211 1212 net->mib.tls_statistics = alloc_percpu(struct linux_tls_mib); 1213 if (!net->mib.tls_statistics) 1214 return -ENOMEM; 1215 1216 err = tls_proc_init(net); 1217 if (err) 1218 goto err_free_stats; 1219 1220 return 0; 1221 err_free_stats: 1222 free_percpu(net->mib.tls_statistics); 1223 return err; 1224 } 1225 1226 static void __net_exit tls_exit_net(struct net *net) 1227 { 1228 tls_proc_fini(net); 1229 free_percpu(net->mib.tls_statistics); 1230 } 1231 1232 static struct pernet_operations tls_proc_ops = { 1233 .init = tls_init_net, 1234 .exit = tls_exit_net, 1235 }; 1236 1237 static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = { 1238 .name = "tls", 1239 .owner = THIS_MODULE, 1240 .init = tls_init, 1241 .update = tls_update, 1242 .get_info = tls_get_info, 1243 .get_info_size = tls_get_info_size, 1244 }; 1245 1246 static int __init tls_register(void) 1247 { 1248 int err; 1249 1250 err = register_pernet_subsys(&tls_proc_ops); 1251 if (err) 1252 return err; 1253 1254 err = tls_strp_dev_init(); 1255 if (err) 1256 goto err_pernet; 1257 1258 err = tls_device_init(); 1259 if (err) 1260 goto err_strp; 1261 1262 tcp_register_ulp(&tcp_tls_ulp_ops); 1263 1264 return 0; 1265 err_strp: 1266 tls_strp_dev_exit(); 1267 err_pernet: 1268 unregister_pernet_subsys(&tls_proc_ops); 1269 return err; 1270 } 1271 1272 static void __exit tls_unregister(void) 1273 { 1274 tcp_unregister_ulp(&tcp_tls_ulp_ops); 1275 tls_strp_dev_exit(); 1276 tls_device_cleanup(); 1277 unregister_pernet_subsys(&tls_proc_ops); 1278 } 1279 1280 module_init(tls_register); 1281 module_exit(tls_unregister); 1282