1 /* 2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/module.h> 35 36 #include <net/tcp.h> 37 #include <net/inet_common.h> 38 #include <linux/highmem.h> 39 #include <linux/netdevice.h> 40 #include <linux/sched/signal.h> 41 #include <linux/inetdevice.h> 42 #include <linux/inet_diag.h> 43 44 #include <net/snmp.h> 45 #include <net/tls.h> 46 #include <net/tls_toe.h> 47 48 #include "tls.h" 49 50 MODULE_AUTHOR("Mellanox Technologies"); 51 MODULE_DESCRIPTION("Transport Layer Security Support"); 52 MODULE_LICENSE("Dual BSD/GPL"); 53 MODULE_ALIAS_TCP_ULP("tls"); 54 55 enum { 56 TLSV4, 57 TLSV6, 58 TLS_NUM_PROTS, 59 }; 60 61 #define CHECK_CIPHER_DESC(cipher,ci) \ 62 static_assert(cipher ## _IV_SIZE <= MAX_IV_SIZE); \ 63 static_assert(cipher ## _REC_SEQ_SIZE <= TLS_MAX_REC_SEQ_SIZE); \ 64 static_assert(cipher ## _TAG_SIZE == TLS_TAG_SIZE); \ 65 static_assert(sizeof_field(struct ci, iv) == cipher ## _IV_SIZE); \ 66 static_assert(sizeof_field(struct ci, key) == cipher ## _KEY_SIZE); \ 67 static_assert(sizeof_field(struct ci, salt) == cipher ## _SALT_SIZE); \ 68 static_assert(sizeof_field(struct ci, rec_seq) == cipher ## _REC_SEQ_SIZE); 69 70 #define __CIPHER_DESC(ci) \ 71 .iv_offset = offsetof(struct ci, iv), \ 72 .key_offset = offsetof(struct ci, key), \ 73 .salt_offset = offsetof(struct ci, salt), \ 74 .rec_seq_offset = offsetof(struct ci, rec_seq), \ 75 .crypto_info = sizeof(struct ci) 76 77 #define CIPHER_DESC(cipher,ci,algname,_offloadable) [cipher - TLS_CIPHER_MIN] = { \ 78 .nonce = cipher ## _IV_SIZE, \ 79 .iv = cipher ## _IV_SIZE, \ 80 .key = cipher ## _KEY_SIZE, \ 81 .salt = cipher ## _SALT_SIZE, \ 82 .tag = cipher ## _TAG_SIZE, \ 83 .rec_seq = cipher ## _REC_SEQ_SIZE, \ 84 .cipher_name = algname, \ 85 .offloadable = _offloadable, \ 86 __CIPHER_DESC(ci), \ 87 } 88 89 #define CIPHER_DESC_NONCE0(cipher,ci,algname,_offloadable) [cipher - TLS_CIPHER_MIN] = { \ 90 .nonce = 0, \ 91 .iv = cipher ## _IV_SIZE, \ 92 .key = cipher ## _KEY_SIZE, \ 93 .salt = cipher ## _SALT_SIZE, \ 94 .tag = cipher ## _TAG_SIZE, \ 95 .rec_seq = cipher ## _REC_SEQ_SIZE, \ 96 .cipher_name = algname, \ 97 .offloadable = _offloadable, \ 98 __CIPHER_DESC(ci), \ 99 } 100 101 const struct tls_cipher_desc tls_cipher_desc[TLS_CIPHER_MAX + 1 - TLS_CIPHER_MIN] = { 102 CIPHER_DESC(TLS_CIPHER_AES_GCM_128, tls12_crypto_info_aes_gcm_128, "gcm(aes)", true), 103 CIPHER_DESC(TLS_CIPHER_AES_GCM_256, tls12_crypto_info_aes_gcm_256, "gcm(aes)", true), 104 CIPHER_DESC(TLS_CIPHER_AES_CCM_128, tls12_crypto_info_aes_ccm_128, "ccm(aes)", false), 105 CIPHER_DESC_NONCE0(TLS_CIPHER_CHACHA20_POLY1305, tls12_crypto_info_chacha20_poly1305, "rfc7539(chacha20,poly1305)", false), 106 CIPHER_DESC(TLS_CIPHER_SM4_GCM, tls12_crypto_info_sm4_gcm, "gcm(sm4)", false), 107 CIPHER_DESC(TLS_CIPHER_SM4_CCM, tls12_crypto_info_sm4_ccm, "ccm(sm4)", false), 108 CIPHER_DESC(TLS_CIPHER_ARIA_GCM_128, tls12_crypto_info_aria_gcm_128, "gcm(aria)", false), 109 CIPHER_DESC(TLS_CIPHER_ARIA_GCM_256, tls12_crypto_info_aria_gcm_256, "gcm(aria)", false), 110 }; 111 112 CHECK_CIPHER_DESC(TLS_CIPHER_AES_GCM_128, tls12_crypto_info_aes_gcm_128); 113 CHECK_CIPHER_DESC(TLS_CIPHER_AES_GCM_256, tls12_crypto_info_aes_gcm_256); 114 CHECK_CIPHER_DESC(TLS_CIPHER_AES_CCM_128, tls12_crypto_info_aes_ccm_128); 115 CHECK_CIPHER_DESC(TLS_CIPHER_CHACHA20_POLY1305, tls12_crypto_info_chacha20_poly1305); 116 CHECK_CIPHER_DESC(TLS_CIPHER_SM4_GCM, tls12_crypto_info_sm4_gcm); 117 CHECK_CIPHER_DESC(TLS_CIPHER_SM4_CCM, tls12_crypto_info_sm4_ccm); 118 CHECK_CIPHER_DESC(TLS_CIPHER_ARIA_GCM_128, tls12_crypto_info_aria_gcm_128); 119 CHECK_CIPHER_DESC(TLS_CIPHER_ARIA_GCM_256, tls12_crypto_info_aria_gcm_256); 120 121 static const struct proto *saved_tcpv6_prot; 122 static DEFINE_MUTEX(tcpv6_prot_mutex); 123 static const struct proto *saved_tcpv4_prot; 124 static DEFINE_MUTEX(tcpv4_prot_mutex); 125 static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG]; 126 static struct proto_ops tls_proto_ops[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG]; 127 static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG], 128 const struct proto *base); 129 130 void update_sk_prot(struct sock *sk, struct tls_context *ctx) 131 { 132 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; 133 134 WRITE_ONCE(sk->sk_prot, 135 &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]); 136 WRITE_ONCE(sk->sk_socket->ops, 137 &tls_proto_ops[ip_ver][ctx->tx_conf][ctx->rx_conf]); 138 } 139 140 int wait_on_pending_writer(struct sock *sk, long *timeo) 141 { 142 int rc = 0; 143 DEFINE_WAIT_FUNC(wait, woken_wake_function); 144 145 add_wait_queue(sk_sleep(sk), &wait); 146 while (1) { 147 if (!*timeo) { 148 rc = -EAGAIN; 149 break; 150 } 151 152 if (signal_pending(current)) { 153 rc = sock_intr_errno(*timeo); 154 break; 155 } 156 157 if (sk_wait_event(sk, timeo, 158 !READ_ONCE(sk->sk_write_pending), &wait)) 159 break; 160 } 161 remove_wait_queue(sk_sleep(sk), &wait); 162 return rc; 163 } 164 165 int tls_push_sg(struct sock *sk, 166 struct tls_context *ctx, 167 struct scatterlist *sg, 168 u16 first_offset, 169 int flags) 170 { 171 struct bio_vec bvec; 172 struct msghdr msg = { 173 .msg_flags = MSG_SPLICE_PAGES | flags, 174 }; 175 int ret = 0; 176 struct page *p; 177 size_t size; 178 int offset = first_offset; 179 180 size = sg->length - offset; 181 offset += sg->offset; 182 183 ctx->splicing_pages = true; 184 while (1) { 185 /* is sending application-limited? */ 186 tcp_rate_check_app_limited(sk); 187 p = sg_page(sg); 188 retry: 189 bvec_set_page(&bvec, p, size, offset); 190 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); 191 192 ret = tcp_sendmsg_locked(sk, &msg, size); 193 194 if (ret != size) { 195 if (ret > 0) { 196 offset += ret; 197 size -= ret; 198 goto retry; 199 } 200 201 offset -= sg->offset; 202 ctx->partially_sent_offset = offset; 203 ctx->partially_sent_record = (void *)sg; 204 ctx->splicing_pages = false; 205 return ret; 206 } 207 208 put_page(p); 209 sk_mem_uncharge(sk, sg->length); 210 sg = sg_next(sg); 211 if (!sg) 212 break; 213 214 offset = sg->offset; 215 size = sg->length; 216 } 217 218 ctx->splicing_pages = false; 219 220 return 0; 221 } 222 223 static int tls_handle_open_record(struct sock *sk, int flags) 224 { 225 struct tls_context *ctx = tls_get_ctx(sk); 226 227 if (tls_is_pending_open_record(ctx)) 228 return ctx->push_pending_record(sk, flags); 229 230 return 0; 231 } 232 233 int tls_process_cmsg(struct sock *sk, struct msghdr *msg, 234 unsigned char *record_type) 235 { 236 struct cmsghdr *cmsg; 237 int rc = -EINVAL; 238 239 for_each_cmsghdr(cmsg, msg) { 240 if (!CMSG_OK(msg, cmsg)) 241 return -EINVAL; 242 if (cmsg->cmsg_level != SOL_TLS) 243 continue; 244 245 switch (cmsg->cmsg_type) { 246 case TLS_SET_RECORD_TYPE: 247 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*record_type))) 248 return -EINVAL; 249 250 if (msg->msg_flags & MSG_MORE) 251 return -EINVAL; 252 253 rc = tls_handle_open_record(sk, msg->msg_flags); 254 if (rc) 255 return rc; 256 257 *record_type = *(unsigned char *)CMSG_DATA(cmsg); 258 rc = 0; 259 break; 260 default: 261 return -EINVAL; 262 } 263 } 264 265 return rc; 266 } 267 268 int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, 269 int flags) 270 { 271 struct scatterlist *sg; 272 u16 offset; 273 274 sg = ctx->partially_sent_record; 275 offset = ctx->partially_sent_offset; 276 277 ctx->partially_sent_record = NULL; 278 return tls_push_sg(sk, ctx, sg, offset, flags); 279 } 280 281 void tls_free_partial_record(struct sock *sk, struct tls_context *ctx) 282 { 283 struct scatterlist *sg; 284 285 for (sg = ctx->partially_sent_record; sg; sg = sg_next(sg)) { 286 put_page(sg_page(sg)); 287 sk_mem_uncharge(sk, sg->length); 288 } 289 ctx->partially_sent_record = NULL; 290 } 291 292 static void tls_write_space(struct sock *sk) 293 { 294 struct tls_context *ctx = tls_get_ctx(sk); 295 296 /* If splicing_pages call lower protocol write space handler 297 * to ensure we wake up any waiting operations there. For example 298 * if splicing pages where to call sk_wait_event. 299 */ 300 if (ctx->splicing_pages) { 301 ctx->sk_write_space(sk); 302 return; 303 } 304 305 #ifdef CONFIG_TLS_DEVICE 306 if (ctx->tx_conf == TLS_HW) 307 tls_device_write_space(sk, ctx); 308 else 309 #endif 310 tls_sw_write_space(sk, ctx); 311 312 ctx->sk_write_space(sk); 313 } 314 315 /** 316 * tls_ctx_free() - free TLS ULP context 317 * @sk: socket to with @ctx is attached 318 * @ctx: TLS context structure 319 * 320 * Free TLS context. If @sk is %NULL caller guarantees that the socket 321 * to which @ctx was attached has no outstanding references. 322 */ 323 void tls_ctx_free(struct sock *sk, struct tls_context *ctx) 324 { 325 if (!ctx) 326 return; 327 328 memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send)); 329 memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv)); 330 mutex_destroy(&ctx->tx_lock); 331 332 if (sk) 333 kfree_rcu(ctx, rcu); 334 else 335 kfree(ctx); 336 } 337 338 static void tls_sk_proto_cleanup(struct sock *sk, 339 struct tls_context *ctx, long timeo) 340 { 341 if (unlikely(sk->sk_write_pending) && 342 !wait_on_pending_writer(sk, &timeo)) 343 tls_handle_open_record(sk, 0); 344 345 /* We need these for tls_sw_fallback handling of other packets */ 346 if (ctx->tx_conf == TLS_SW) { 347 kfree(ctx->tx.rec_seq); 348 kfree(ctx->tx.iv); 349 tls_sw_release_resources_tx(sk); 350 TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXSW); 351 } else if (ctx->tx_conf == TLS_HW) { 352 tls_device_free_resources_tx(sk); 353 TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXDEVICE); 354 } 355 356 if (ctx->rx_conf == TLS_SW) { 357 tls_sw_release_resources_rx(sk); 358 TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXSW); 359 } else if (ctx->rx_conf == TLS_HW) { 360 tls_device_offload_cleanup_rx(sk); 361 TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXDEVICE); 362 } 363 } 364 365 static void tls_sk_proto_close(struct sock *sk, long timeout) 366 { 367 struct inet_connection_sock *icsk = inet_csk(sk); 368 struct tls_context *ctx = tls_get_ctx(sk); 369 long timeo = sock_sndtimeo(sk, 0); 370 bool free_ctx; 371 372 if (ctx->tx_conf == TLS_SW) 373 tls_sw_cancel_work_tx(ctx); 374 375 lock_sock(sk); 376 free_ctx = ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW; 377 378 if (ctx->tx_conf != TLS_BASE || ctx->rx_conf != TLS_BASE) 379 tls_sk_proto_cleanup(sk, ctx, timeo); 380 381 write_lock_bh(&sk->sk_callback_lock); 382 if (free_ctx) 383 rcu_assign_pointer(icsk->icsk_ulp_data, NULL); 384 WRITE_ONCE(sk->sk_prot, ctx->sk_proto); 385 if (sk->sk_write_space == tls_write_space) 386 sk->sk_write_space = ctx->sk_write_space; 387 write_unlock_bh(&sk->sk_callback_lock); 388 release_sock(sk); 389 if (ctx->tx_conf == TLS_SW) 390 tls_sw_free_ctx_tx(ctx); 391 if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW) 392 tls_sw_strparser_done(ctx); 393 if (ctx->rx_conf == TLS_SW) 394 tls_sw_free_ctx_rx(ctx); 395 ctx->sk_proto->close(sk, timeout); 396 397 if (free_ctx) 398 tls_ctx_free(sk, ctx); 399 } 400 401 static __poll_t tls_sk_poll(struct file *file, struct socket *sock, 402 struct poll_table_struct *wait) 403 { 404 struct tls_sw_context_rx *ctx; 405 struct tls_context *tls_ctx; 406 struct sock *sk = sock->sk; 407 struct sk_psock *psock; 408 __poll_t mask = 0; 409 u8 shutdown; 410 int state; 411 412 mask = tcp_poll(file, sock, wait); 413 414 state = inet_sk_state_load(sk); 415 shutdown = READ_ONCE(sk->sk_shutdown); 416 if (unlikely(state != TCP_ESTABLISHED || shutdown & RCV_SHUTDOWN)) 417 return mask; 418 419 tls_ctx = tls_get_ctx(sk); 420 ctx = tls_sw_ctx_rx(tls_ctx); 421 psock = sk_psock_get(sk); 422 423 if (skb_queue_empty_lockless(&ctx->rx_list) && 424 !tls_strp_msg_ready(ctx) && 425 sk_psock_queue_empty(psock)) 426 mask &= ~(EPOLLIN | EPOLLRDNORM); 427 428 if (psock) 429 sk_psock_put(sk, psock); 430 431 return mask; 432 } 433 434 static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval, 435 int __user *optlen, int tx) 436 { 437 int rc = 0; 438 struct tls_context *ctx = tls_get_ctx(sk); 439 struct tls_crypto_info *crypto_info; 440 struct cipher_context *cctx; 441 int len; 442 443 if (get_user(len, optlen)) 444 return -EFAULT; 445 446 if (!optval || (len < sizeof(*crypto_info))) { 447 rc = -EINVAL; 448 goto out; 449 } 450 451 if (!ctx) { 452 rc = -EBUSY; 453 goto out; 454 } 455 456 /* get user crypto info */ 457 if (tx) { 458 crypto_info = &ctx->crypto_send.info; 459 cctx = &ctx->tx; 460 } else { 461 crypto_info = &ctx->crypto_recv.info; 462 cctx = &ctx->rx; 463 } 464 465 if (!TLS_CRYPTO_INFO_READY(crypto_info)) { 466 rc = -EBUSY; 467 goto out; 468 } 469 470 if (len == sizeof(*crypto_info)) { 471 if (copy_to_user(optval, crypto_info, sizeof(*crypto_info))) 472 rc = -EFAULT; 473 goto out; 474 } 475 476 switch (crypto_info->cipher_type) { 477 case TLS_CIPHER_AES_GCM_128: { 478 struct tls12_crypto_info_aes_gcm_128 * 479 crypto_info_aes_gcm_128 = 480 container_of(crypto_info, 481 struct tls12_crypto_info_aes_gcm_128, 482 info); 483 484 if (len != sizeof(*crypto_info_aes_gcm_128)) { 485 rc = -EINVAL; 486 goto out; 487 } 488 memcpy(crypto_info_aes_gcm_128->iv, 489 cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, 490 TLS_CIPHER_AES_GCM_128_IV_SIZE); 491 memcpy(crypto_info_aes_gcm_128->rec_seq, cctx->rec_seq, 492 TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE); 493 if (copy_to_user(optval, 494 crypto_info_aes_gcm_128, 495 sizeof(*crypto_info_aes_gcm_128))) 496 rc = -EFAULT; 497 break; 498 } 499 case TLS_CIPHER_AES_GCM_256: { 500 struct tls12_crypto_info_aes_gcm_256 * 501 crypto_info_aes_gcm_256 = 502 container_of(crypto_info, 503 struct tls12_crypto_info_aes_gcm_256, 504 info); 505 506 if (len != sizeof(*crypto_info_aes_gcm_256)) { 507 rc = -EINVAL; 508 goto out; 509 } 510 memcpy(crypto_info_aes_gcm_256->iv, 511 cctx->iv + TLS_CIPHER_AES_GCM_256_SALT_SIZE, 512 TLS_CIPHER_AES_GCM_256_IV_SIZE); 513 memcpy(crypto_info_aes_gcm_256->rec_seq, cctx->rec_seq, 514 TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE); 515 if (copy_to_user(optval, 516 crypto_info_aes_gcm_256, 517 sizeof(*crypto_info_aes_gcm_256))) 518 rc = -EFAULT; 519 break; 520 } 521 case TLS_CIPHER_AES_CCM_128: { 522 struct tls12_crypto_info_aes_ccm_128 *aes_ccm_128 = 523 container_of(crypto_info, 524 struct tls12_crypto_info_aes_ccm_128, info); 525 526 if (len != sizeof(*aes_ccm_128)) { 527 rc = -EINVAL; 528 goto out; 529 } 530 memcpy(aes_ccm_128->iv, 531 cctx->iv + TLS_CIPHER_AES_CCM_128_SALT_SIZE, 532 TLS_CIPHER_AES_CCM_128_IV_SIZE); 533 memcpy(aes_ccm_128->rec_seq, cctx->rec_seq, 534 TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE); 535 if (copy_to_user(optval, aes_ccm_128, sizeof(*aes_ccm_128))) 536 rc = -EFAULT; 537 break; 538 } 539 case TLS_CIPHER_CHACHA20_POLY1305: { 540 struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305 = 541 container_of(crypto_info, 542 struct tls12_crypto_info_chacha20_poly1305, 543 info); 544 545 if (len != sizeof(*chacha20_poly1305)) { 546 rc = -EINVAL; 547 goto out; 548 } 549 memcpy(chacha20_poly1305->iv, 550 cctx->iv + TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE, 551 TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE); 552 memcpy(chacha20_poly1305->rec_seq, cctx->rec_seq, 553 TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE); 554 if (copy_to_user(optval, chacha20_poly1305, 555 sizeof(*chacha20_poly1305))) 556 rc = -EFAULT; 557 break; 558 } 559 case TLS_CIPHER_SM4_GCM: { 560 struct tls12_crypto_info_sm4_gcm *sm4_gcm_info = 561 container_of(crypto_info, 562 struct tls12_crypto_info_sm4_gcm, info); 563 564 if (len != sizeof(*sm4_gcm_info)) { 565 rc = -EINVAL; 566 goto out; 567 } 568 memcpy(sm4_gcm_info->iv, 569 cctx->iv + TLS_CIPHER_SM4_GCM_SALT_SIZE, 570 TLS_CIPHER_SM4_GCM_IV_SIZE); 571 memcpy(sm4_gcm_info->rec_seq, cctx->rec_seq, 572 TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE); 573 if (copy_to_user(optval, sm4_gcm_info, sizeof(*sm4_gcm_info))) 574 rc = -EFAULT; 575 break; 576 } 577 case TLS_CIPHER_SM4_CCM: { 578 struct tls12_crypto_info_sm4_ccm *sm4_ccm_info = 579 container_of(crypto_info, 580 struct tls12_crypto_info_sm4_ccm, info); 581 582 if (len != sizeof(*sm4_ccm_info)) { 583 rc = -EINVAL; 584 goto out; 585 } 586 memcpy(sm4_ccm_info->iv, 587 cctx->iv + TLS_CIPHER_SM4_CCM_SALT_SIZE, 588 TLS_CIPHER_SM4_CCM_IV_SIZE); 589 memcpy(sm4_ccm_info->rec_seq, cctx->rec_seq, 590 TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE); 591 if (copy_to_user(optval, sm4_ccm_info, sizeof(*sm4_ccm_info))) 592 rc = -EFAULT; 593 break; 594 } 595 case TLS_CIPHER_ARIA_GCM_128: { 596 struct tls12_crypto_info_aria_gcm_128 * 597 crypto_info_aria_gcm_128 = 598 container_of(crypto_info, 599 struct tls12_crypto_info_aria_gcm_128, 600 info); 601 602 if (len != sizeof(*crypto_info_aria_gcm_128)) { 603 rc = -EINVAL; 604 goto out; 605 } 606 memcpy(crypto_info_aria_gcm_128->iv, 607 cctx->iv + TLS_CIPHER_ARIA_GCM_128_SALT_SIZE, 608 TLS_CIPHER_ARIA_GCM_128_IV_SIZE); 609 memcpy(crypto_info_aria_gcm_128->rec_seq, cctx->rec_seq, 610 TLS_CIPHER_ARIA_GCM_128_REC_SEQ_SIZE); 611 if (copy_to_user(optval, 612 crypto_info_aria_gcm_128, 613 sizeof(*crypto_info_aria_gcm_128))) 614 rc = -EFAULT; 615 break; 616 } 617 case TLS_CIPHER_ARIA_GCM_256: { 618 struct tls12_crypto_info_aria_gcm_256 * 619 crypto_info_aria_gcm_256 = 620 container_of(crypto_info, 621 struct tls12_crypto_info_aria_gcm_256, 622 info); 623 624 if (len != sizeof(*crypto_info_aria_gcm_256)) { 625 rc = -EINVAL; 626 goto out; 627 } 628 memcpy(crypto_info_aria_gcm_256->iv, 629 cctx->iv + TLS_CIPHER_ARIA_GCM_256_SALT_SIZE, 630 TLS_CIPHER_ARIA_GCM_256_IV_SIZE); 631 memcpy(crypto_info_aria_gcm_256->rec_seq, cctx->rec_seq, 632 TLS_CIPHER_ARIA_GCM_256_REC_SEQ_SIZE); 633 if (copy_to_user(optval, 634 crypto_info_aria_gcm_256, 635 sizeof(*crypto_info_aria_gcm_256))) 636 rc = -EFAULT; 637 break; 638 } 639 default: 640 rc = -EINVAL; 641 } 642 643 out: 644 return rc; 645 } 646 647 static int do_tls_getsockopt_tx_zc(struct sock *sk, char __user *optval, 648 int __user *optlen) 649 { 650 struct tls_context *ctx = tls_get_ctx(sk); 651 unsigned int value; 652 int len; 653 654 if (get_user(len, optlen)) 655 return -EFAULT; 656 657 if (len != sizeof(value)) 658 return -EINVAL; 659 660 value = ctx->zerocopy_sendfile; 661 if (copy_to_user(optval, &value, sizeof(value))) 662 return -EFAULT; 663 664 return 0; 665 } 666 667 static int do_tls_getsockopt_no_pad(struct sock *sk, char __user *optval, 668 int __user *optlen) 669 { 670 struct tls_context *ctx = tls_get_ctx(sk); 671 int value, len; 672 673 if (ctx->prot_info.version != TLS_1_3_VERSION) 674 return -EINVAL; 675 676 if (get_user(len, optlen)) 677 return -EFAULT; 678 if (len < sizeof(value)) 679 return -EINVAL; 680 681 value = -EINVAL; 682 if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW) 683 value = ctx->rx_no_pad; 684 if (value < 0) 685 return value; 686 687 if (put_user(sizeof(value), optlen)) 688 return -EFAULT; 689 if (copy_to_user(optval, &value, sizeof(value))) 690 return -EFAULT; 691 692 return 0; 693 } 694 695 static int do_tls_getsockopt(struct sock *sk, int optname, 696 char __user *optval, int __user *optlen) 697 { 698 int rc = 0; 699 700 lock_sock(sk); 701 702 switch (optname) { 703 case TLS_TX: 704 case TLS_RX: 705 rc = do_tls_getsockopt_conf(sk, optval, optlen, 706 optname == TLS_TX); 707 break; 708 case TLS_TX_ZEROCOPY_RO: 709 rc = do_tls_getsockopt_tx_zc(sk, optval, optlen); 710 break; 711 case TLS_RX_EXPECT_NO_PAD: 712 rc = do_tls_getsockopt_no_pad(sk, optval, optlen); 713 break; 714 default: 715 rc = -ENOPROTOOPT; 716 break; 717 } 718 719 release_sock(sk); 720 721 return rc; 722 } 723 724 static int tls_getsockopt(struct sock *sk, int level, int optname, 725 char __user *optval, int __user *optlen) 726 { 727 struct tls_context *ctx = tls_get_ctx(sk); 728 729 if (level != SOL_TLS) 730 return ctx->sk_proto->getsockopt(sk, level, 731 optname, optval, optlen); 732 733 return do_tls_getsockopt(sk, optname, optval, optlen); 734 } 735 736 static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval, 737 unsigned int optlen, int tx) 738 { 739 struct tls_crypto_info *crypto_info; 740 struct tls_crypto_info *alt_crypto_info; 741 struct tls_context *ctx = tls_get_ctx(sk); 742 size_t optsize; 743 int rc = 0; 744 int conf; 745 746 if (sockptr_is_null(optval) || (optlen < sizeof(*crypto_info))) 747 return -EINVAL; 748 749 if (tx) { 750 crypto_info = &ctx->crypto_send.info; 751 alt_crypto_info = &ctx->crypto_recv.info; 752 } else { 753 crypto_info = &ctx->crypto_recv.info; 754 alt_crypto_info = &ctx->crypto_send.info; 755 } 756 757 /* Currently we don't support set crypto info more than one time */ 758 if (TLS_CRYPTO_INFO_READY(crypto_info)) 759 return -EBUSY; 760 761 rc = copy_from_sockptr(crypto_info, optval, sizeof(*crypto_info)); 762 if (rc) { 763 rc = -EFAULT; 764 goto err_crypto_info; 765 } 766 767 /* check version */ 768 if (crypto_info->version != TLS_1_2_VERSION && 769 crypto_info->version != TLS_1_3_VERSION) { 770 rc = -EINVAL; 771 goto err_crypto_info; 772 } 773 774 /* Ensure that TLS version and ciphers are same in both directions */ 775 if (TLS_CRYPTO_INFO_READY(alt_crypto_info)) { 776 if (alt_crypto_info->version != crypto_info->version || 777 alt_crypto_info->cipher_type != crypto_info->cipher_type) { 778 rc = -EINVAL; 779 goto err_crypto_info; 780 } 781 } 782 783 switch (crypto_info->cipher_type) { 784 case TLS_CIPHER_AES_GCM_128: 785 optsize = sizeof(struct tls12_crypto_info_aes_gcm_128); 786 break; 787 case TLS_CIPHER_AES_GCM_256: { 788 optsize = sizeof(struct tls12_crypto_info_aes_gcm_256); 789 break; 790 } 791 case TLS_CIPHER_AES_CCM_128: 792 optsize = sizeof(struct tls12_crypto_info_aes_ccm_128); 793 break; 794 case TLS_CIPHER_CHACHA20_POLY1305: 795 optsize = sizeof(struct tls12_crypto_info_chacha20_poly1305); 796 break; 797 case TLS_CIPHER_SM4_GCM: 798 optsize = sizeof(struct tls12_crypto_info_sm4_gcm); 799 break; 800 case TLS_CIPHER_SM4_CCM: 801 optsize = sizeof(struct tls12_crypto_info_sm4_ccm); 802 break; 803 case TLS_CIPHER_ARIA_GCM_128: 804 if (crypto_info->version != TLS_1_2_VERSION) { 805 rc = -EINVAL; 806 goto err_crypto_info; 807 } 808 optsize = sizeof(struct tls12_crypto_info_aria_gcm_128); 809 break; 810 case TLS_CIPHER_ARIA_GCM_256: 811 if (crypto_info->version != TLS_1_2_VERSION) { 812 rc = -EINVAL; 813 goto err_crypto_info; 814 } 815 optsize = sizeof(struct tls12_crypto_info_aria_gcm_256); 816 break; 817 default: 818 rc = -EINVAL; 819 goto err_crypto_info; 820 } 821 822 if (optlen != optsize) { 823 rc = -EINVAL; 824 goto err_crypto_info; 825 } 826 827 rc = copy_from_sockptr_offset(crypto_info + 1, optval, 828 sizeof(*crypto_info), 829 optlen - sizeof(*crypto_info)); 830 if (rc) { 831 rc = -EFAULT; 832 goto err_crypto_info; 833 } 834 835 if (tx) { 836 rc = tls_set_device_offload(sk, ctx); 837 conf = TLS_HW; 838 if (!rc) { 839 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSTXDEVICE); 840 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXDEVICE); 841 } else { 842 rc = tls_set_sw_offload(sk, ctx, 1); 843 if (rc) 844 goto err_crypto_info; 845 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSTXSW); 846 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXSW); 847 conf = TLS_SW; 848 } 849 } else { 850 rc = tls_set_device_offload_rx(sk, ctx); 851 conf = TLS_HW; 852 if (!rc) { 853 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICE); 854 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXDEVICE); 855 } else { 856 rc = tls_set_sw_offload(sk, ctx, 0); 857 if (rc) 858 goto err_crypto_info; 859 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXSW); 860 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXSW); 861 conf = TLS_SW; 862 } 863 tls_sw_strparser_arm(sk, ctx); 864 } 865 866 if (tx) 867 ctx->tx_conf = conf; 868 else 869 ctx->rx_conf = conf; 870 update_sk_prot(sk, ctx); 871 if (tx) { 872 ctx->sk_write_space = sk->sk_write_space; 873 sk->sk_write_space = tls_write_space; 874 } else { 875 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(ctx); 876 877 tls_strp_check_rcv(&rx_ctx->strp); 878 } 879 return 0; 880 881 err_crypto_info: 882 memzero_explicit(crypto_info, sizeof(union tls_crypto_context)); 883 return rc; 884 } 885 886 static int do_tls_setsockopt_tx_zc(struct sock *sk, sockptr_t optval, 887 unsigned int optlen) 888 { 889 struct tls_context *ctx = tls_get_ctx(sk); 890 unsigned int value; 891 892 if (sockptr_is_null(optval) || optlen != sizeof(value)) 893 return -EINVAL; 894 895 if (copy_from_sockptr(&value, optval, sizeof(value))) 896 return -EFAULT; 897 898 if (value > 1) 899 return -EINVAL; 900 901 ctx->zerocopy_sendfile = value; 902 903 return 0; 904 } 905 906 static int do_tls_setsockopt_no_pad(struct sock *sk, sockptr_t optval, 907 unsigned int optlen) 908 { 909 struct tls_context *ctx = tls_get_ctx(sk); 910 u32 val; 911 int rc; 912 913 if (ctx->prot_info.version != TLS_1_3_VERSION || 914 sockptr_is_null(optval) || optlen < sizeof(val)) 915 return -EINVAL; 916 917 rc = copy_from_sockptr(&val, optval, sizeof(val)); 918 if (rc) 919 return -EFAULT; 920 if (val > 1) 921 return -EINVAL; 922 rc = check_zeroed_sockptr(optval, sizeof(val), optlen - sizeof(val)); 923 if (rc < 1) 924 return rc == 0 ? -EINVAL : rc; 925 926 lock_sock(sk); 927 rc = -EINVAL; 928 if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW) { 929 ctx->rx_no_pad = val; 930 tls_update_rx_zc_capable(ctx); 931 rc = 0; 932 } 933 release_sock(sk); 934 935 return rc; 936 } 937 938 static int do_tls_setsockopt(struct sock *sk, int optname, sockptr_t optval, 939 unsigned int optlen) 940 { 941 int rc = 0; 942 943 switch (optname) { 944 case TLS_TX: 945 case TLS_RX: 946 lock_sock(sk); 947 rc = do_tls_setsockopt_conf(sk, optval, optlen, 948 optname == TLS_TX); 949 release_sock(sk); 950 break; 951 case TLS_TX_ZEROCOPY_RO: 952 lock_sock(sk); 953 rc = do_tls_setsockopt_tx_zc(sk, optval, optlen); 954 release_sock(sk); 955 break; 956 case TLS_RX_EXPECT_NO_PAD: 957 rc = do_tls_setsockopt_no_pad(sk, optval, optlen); 958 break; 959 default: 960 rc = -ENOPROTOOPT; 961 break; 962 } 963 return rc; 964 } 965 966 static int tls_setsockopt(struct sock *sk, int level, int optname, 967 sockptr_t optval, unsigned int optlen) 968 { 969 struct tls_context *ctx = tls_get_ctx(sk); 970 971 if (level != SOL_TLS) 972 return ctx->sk_proto->setsockopt(sk, level, optname, optval, 973 optlen); 974 975 return do_tls_setsockopt(sk, optname, optval, optlen); 976 } 977 978 struct tls_context *tls_ctx_create(struct sock *sk) 979 { 980 struct inet_connection_sock *icsk = inet_csk(sk); 981 struct tls_context *ctx; 982 983 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); 984 if (!ctx) 985 return NULL; 986 987 mutex_init(&ctx->tx_lock); 988 rcu_assign_pointer(icsk->icsk_ulp_data, ctx); 989 ctx->sk_proto = READ_ONCE(sk->sk_prot); 990 ctx->sk = sk; 991 return ctx; 992 } 993 994 static void build_proto_ops(struct proto_ops ops[TLS_NUM_CONFIG][TLS_NUM_CONFIG], 995 const struct proto_ops *base) 996 { 997 ops[TLS_BASE][TLS_BASE] = *base; 998 999 ops[TLS_SW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE]; 1000 ops[TLS_SW ][TLS_BASE].splice_eof = tls_sw_splice_eof; 1001 1002 ops[TLS_BASE][TLS_SW ] = ops[TLS_BASE][TLS_BASE]; 1003 ops[TLS_BASE][TLS_SW ].splice_read = tls_sw_splice_read; 1004 ops[TLS_BASE][TLS_SW ].poll = tls_sk_poll; 1005 ops[TLS_BASE][TLS_SW ].read_sock = tls_sw_read_sock; 1006 1007 ops[TLS_SW ][TLS_SW ] = ops[TLS_SW ][TLS_BASE]; 1008 ops[TLS_SW ][TLS_SW ].splice_read = tls_sw_splice_read; 1009 ops[TLS_SW ][TLS_SW ].poll = tls_sk_poll; 1010 ops[TLS_SW ][TLS_SW ].read_sock = tls_sw_read_sock; 1011 1012 #ifdef CONFIG_TLS_DEVICE 1013 ops[TLS_HW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE]; 1014 1015 ops[TLS_HW ][TLS_SW ] = ops[TLS_BASE][TLS_SW ]; 1016 1017 ops[TLS_BASE][TLS_HW ] = ops[TLS_BASE][TLS_SW ]; 1018 1019 ops[TLS_SW ][TLS_HW ] = ops[TLS_SW ][TLS_SW ]; 1020 1021 ops[TLS_HW ][TLS_HW ] = ops[TLS_HW ][TLS_SW ]; 1022 #endif 1023 #ifdef CONFIG_TLS_TOE 1024 ops[TLS_HW_RECORD][TLS_HW_RECORD] = *base; 1025 #endif 1026 } 1027 1028 static void tls_build_proto(struct sock *sk) 1029 { 1030 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; 1031 struct proto *prot = READ_ONCE(sk->sk_prot); 1032 1033 /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */ 1034 if (ip_ver == TLSV6 && 1035 unlikely(prot != smp_load_acquire(&saved_tcpv6_prot))) { 1036 mutex_lock(&tcpv6_prot_mutex); 1037 if (likely(prot != saved_tcpv6_prot)) { 1038 build_protos(tls_prots[TLSV6], prot); 1039 build_proto_ops(tls_proto_ops[TLSV6], 1040 sk->sk_socket->ops); 1041 smp_store_release(&saved_tcpv6_prot, prot); 1042 } 1043 mutex_unlock(&tcpv6_prot_mutex); 1044 } 1045 1046 if (ip_ver == TLSV4 && 1047 unlikely(prot != smp_load_acquire(&saved_tcpv4_prot))) { 1048 mutex_lock(&tcpv4_prot_mutex); 1049 if (likely(prot != saved_tcpv4_prot)) { 1050 build_protos(tls_prots[TLSV4], prot); 1051 build_proto_ops(tls_proto_ops[TLSV4], 1052 sk->sk_socket->ops); 1053 smp_store_release(&saved_tcpv4_prot, prot); 1054 } 1055 mutex_unlock(&tcpv4_prot_mutex); 1056 } 1057 } 1058 1059 static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG], 1060 const struct proto *base) 1061 { 1062 prot[TLS_BASE][TLS_BASE] = *base; 1063 prot[TLS_BASE][TLS_BASE].setsockopt = tls_setsockopt; 1064 prot[TLS_BASE][TLS_BASE].getsockopt = tls_getsockopt; 1065 prot[TLS_BASE][TLS_BASE].close = tls_sk_proto_close; 1066 1067 prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE]; 1068 prot[TLS_SW][TLS_BASE].sendmsg = tls_sw_sendmsg; 1069 prot[TLS_SW][TLS_BASE].splice_eof = tls_sw_splice_eof; 1070 1071 prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE]; 1072 prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg; 1073 prot[TLS_BASE][TLS_SW].sock_is_readable = tls_sw_sock_is_readable; 1074 prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close; 1075 1076 prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE]; 1077 prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg; 1078 prot[TLS_SW][TLS_SW].sock_is_readable = tls_sw_sock_is_readable; 1079 prot[TLS_SW][TLS_SW].close = tls_sk_proto_close; 1080 1081 #ifdef CONFIG_TLS_DEVICE 1082 prot[TLS_HW][TLS_BASE] = prot[TLS_BASE][TLS_BASE]; 1083 prot[TLS_HW][TLS_BASE].sendmsg = tls_device_sendmsg; 1084 prot[TLS_HW][TLS_BASE].splice_eof = tls_device_splice_eof; 1085 1086 prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW]; 1087 prot[TLS_HW][TLS_SW].sendmsg = tls_device_sendmsg; 1088 prot[TLS_HW][TLS_SW].splice_eof = tls_device_splice_eof; 1089 1090 prot[TLS_BASE][TLS_HW] = prot[TLS_BASE][TLS_SW]; 1091 1092 prot[TLS_SW][TLS_HW] = prot[TLS_SW][TLS_SW]; 1093 1094 prot[TLS_HW][TLS_HW] = prot[TLS_HW][TLS_SW]; 1095 #endif 1096 #ifdef CONFIG_TLS_TOE 1097 prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base; 1098 prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_toe_hash; 1099 prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_toe_unhash; 1100 #endif 1101 } 1102 1103 static int tls_init(struct sock *sk) 1104 { 1105 struct tls_context *ctx; 1106 int rc = 0; 1107 1108 tls_build_proto(sk); 1109 1110 #ifdef CONFIG_TLS_TOE 1111 if (tls_toe_bypass(sk)) 1112 return 0; 1113 #endif 1114 1115 /* The TLS ulp is currently supported only for TCP sockets 1116 * in ESTABLISHED state. 1117 * Supporting sockets in LISTEN state will require us 1118 * to modify the accept implementation to clone rather then 1119 * share the ulp context. 1120 */ 1121 if (sk->sk_state != TCP_ESTABLISHED) 1122 return -ENOTCONN; 1123 1124 /* allocate tls context */ 1125 write_lock_bh(&sk->sk_callback_lock); 1126 ctx = tls_ctx_create(sk); 1127 if (!ctx) { 1128 rc = -ENOMEM; 1129 goto out; 1130 } 1131 1132 ctx->tx_conf = TLS_BASE; 1133 ctx->rx_conf = TLS_BASE; 1134 update_sk_prot(sk, ctx); 1135 out: 1136 write_unlock_bh(&sk->sk_callback_lock); 1137 return rc; 1138 } 1139 1140 static void tls_update(struct sock *sk, struct proto *p, 1141 void (*write_space)(struct sock *sk)) 1142 { 1143 struct tls_context *ctx; 1144 1145 WARN_ON_ONCE(sk->sk_prot == p); 1146 1147 ctx = tls_get_ctx(sk); 1148 if (likely(ctx)) { 1149 ctx->sk_write_space = write_space; 1150 ctx->sk_proto = p; 1151 } else { 1152 /* Pairs with lockless read in sk_clone_lock(). */ 1153 WRITE_ONCE(sk->sk_prot, p); 1154 sk->sk_write_space = write_space; 1155 } 1156 } 1157 1158 static u16 tls_user_config(struct tls_context *ctx, bool tx) 1159 { 1160 u16 config = tx ? ctx->tx_conf : ctx->rx_conf; 1161 1162 switch (config) { 1163 case TLS_BASE: 1164 return TLS_CONF_BASE; 1165 case TLS_SW: 1166 return TLS_CONF_SW; 1167 case TLS_HW: 1168 return TLS_CONF_HW; 1169 case TLS_HW_RECORD: 1170 return TLS_CONF_HW_RECORD; 1171 } 1172 return 0; 1173 } 1174 1175 static int tls_get_info(const struct sock *sk, struct sk_buff *skb) 1176 { 1177 u16 version, cipher_type; 1178 struct tls_context *ctx; 1179 struct nlattr *start; 1180 int err; 1181 1182 start = nla_nest_start_noflag(skb, INET_ULP_INFO_TLS); 1183 if (!start) 1184 return -EMSGSIZE; 1185 1186 rcu_read_lock(); 1187 ctx = rcu_dereference(inet_csk(sk)->icsk_ulp_data); 1188 if (!ctx) { 1189 err = 0; 1190 goto nla_failure; 1191 } 1192 version = ctx->prot_info.version; 1193 if (version) { 1194 err = nla_put_u16(skb, TLS_INFO_VERSION, version); 1195 if (err) 1196 goto nla_failure; 1197 } 1198 cipher_type = ctx->prot_info.cipher_type; 1199 if (cipher_type) { 1200 err = nla_put_u16(skb, TLS_INFO_CIPHER, cipher_type); 1201 if (err) 1202 goto nla_failure; 1203 } 1204 err = nla_put_u16(skb, TLS_INFO_TXCONF, tls_user_config(ctx, true)); 1205 if (err) 1206 goto nla_failure; 1207 1208 err = nla_put_u16(skb, TLS_INFO_RXCONF, tls_user_config(ctx, false)); 1209 if (err) 1210 goto nla_failure; 1211 1212 if (ctx->tx_conf == TLS_HW && ctx->zerocopy_sendfile) { 1213 err = nla_put_flag(skb, TLS_INFO_ZC_RO_TX); 1214 if (err) 1215 goto nla_failure; 1216 } 1217 if (ctx->rx_no_pad) { 1218 err = nla_put_flag(skb, TLS_INFO_RX_NO_PAD); 1219 if (err) 1220 goto nla_failure; 1221 } 1222 1223 rcu_read_unlock(); 1224 nla_nest_end(skb, start); 1225 return 0; 1226 1227 nla_failure: 1228 rcu_read_unlock(); 1229 nla_nest_cancel(skb, start); 1230 return err; 1231 } 1232 1233 static size_t tls_get_info_size(const struct sock *sk) 1234 { 1235 size_t size = 0; 1236 1237 size += nla_total_size(0) + /* INET_ULP_INFO_TLS */ 1238 nla_total_size(sizeof(u16)) + /* TLS_INFO_VERSION */ 1239 nla_total_size(sizeof(u16)) + /* TLS_INFO_CIPHER */ 1240 nla_total_size(sizeof(u16)) + /* TLS_INFO_RXCONF */ 1241 nla_total_size(sizeof(u16)) + /* TLS_INFO_TXCONF */ 1242 nla_total_size(0) + /* TLS_INFO_ZC_RO_TX */ 1243 nla_total_size(0) + /* TLS_INFO_RX_NO_PAD */ 1244 0; 1245 1246 return size; 1247 } 1248 1249 static int __net_init tls_init_net(struct net *net) 1250 { 1251 int err; 1252 1253 net->mib.tls_statistics = alloc_percpu(struct linux_tls_mib); 1254 if (!net->mib.tls_statistics) 1255 return -ENOMEM; 1256 1257 err = tls_proc_init(net); 1258 if (err) 1259 goto err_free_stats; 1260 1261 return 0; 1262 err_free_stats: 1263 free_percpu(net->mib.tls_statistics); 1264 return err; 1265 } 1266 1267 static void __net_exit tls_exit_net(struct net *net) 1268 { 1269 tls_proc_fini(net); 1270 free_percpu(net->mib.tls_statistics); 1271 } 1272 1273 static struct pernet_operations tls_proc_ops = { 1274 .init = tls_init_net, 1275 .exit = tls_exit_net, 1276 }; 1277 1278 static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = { 1279 .name = "tls", 1280 .owner = THIS_MODULE, 1281 .init = tls_init, 1282 .update = tls_update, 1283 .get_info = tls_get_info, 1284 .get_info_size = tls_get_info_size, 1285 }; 1286 1287 static int __init tls_register(void) 1288 { 1289 int err; 1290 1291 err = register_pernet_subsys(&tls_proc_ops); 1292 if (err) 1293 return err; 1294 1295 err = tls_strp_dev_init(); 1296 if (err) 1297 goto err_pernet; 1298 1299 err = tls_device_init(); 1300 if (err) 1301 goto err_strp; 1302 1303 tcp_register_ulp(&tcp_tls_ulp_ops); 1304 1305 return 0; 1306 err_strp: 1307 tls_strp_dev_exit(); 1308 err_pernet: 1309 unregister_pernet_subsys(&tls_proc_ops); 1310 return err; 1311 } 1312 1313 static void __exit tls_unregister(void) 1314 { 1315 tcp_unregister_ulp(&tcp_tls_ulp_ops); 1316 tls_strp_dev_exit(); 1317 tls_device_cleanup(); 1318 unregister_pernet_subsys(&tls_proc_ops); 1319 } 1320 1321 module_init(tls_register); 1322 module_exit(tls_unregister); 1323