1 /* 2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved. 4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved. 5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved. 6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved. 7 * 8 * This software is available to you under a choice of one of two 9 * licenses. You may choose to be licensed under the terms of the GNU 10 * General Public License (GPL) Version 2, available from the file 11 * COPYING in the main directory of this source tree, or the 12 * OpenIB.org BSD license below: 13 * 14 * Redistribution and use in source and binary forms, with or 15 * without modification, are permitted provided that the following 16 * conditions are met: 17 * 18 * - Redistributions of source code must retain the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer. 21 * 22 * - Redistributions in binary form must reproduce the above 23 * copyright notice, this list of conditions and the following 24 * disclaimer in the documentation and/or other materials 25 * provided with the distribution. 26 * 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 34 * SOFTWARE. 35 */ 36 37 #include <linux/module.h> 38 #include <crypto/aead.h> 39 40 #include <net/tls.h> 41 42 static void trim_sg(struct sock *sk, struct scatterlist *sg, 43 int *sg_num_elem, unsigned int *sg_size, int target_size) 44 { 45 int i = *sg_num_elem - 1; 46 int trim = *sg_size - target_size; 47 48 if (trim <= 0) { 49 WARN_ON(trim < 0); 50 return; 51 } 52 53 *sg_size = target_size; 54 while (trim >= sg[i].length) { 55 trim -= sg[i].length; 56 sk_mem_uncharge(sk, sg[i].length); 57 put_page(sg_page(&sg[i])); 58 i--; 59 60 if (i < 0) 61 goto out; 62 } 63 64 sg[i].length -= trim; 65 sk_mem_uncharge(sk, trim); 66 67 out: 68 *sg_num_elem = i + 1; 69 } 70 71 static void trim_both_sgl(struct sock *sk, int target_size) 72 { 73 struct tls_context *tls_ctx = tls_get_ctx(sk); 74 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 75 76 trim_sg(sk, ctx->sg_plaintext_data, 77 &ctx->sg_plaintext_num_elem, 78 &ctx->sg_plaintext_size, 79 target_size); 80 81 if (target_size > 0) 82 target_size += tls_ctx->tx.overhead_size; 83 84 trim_sg(sk, ctx->sg_encrypted_data, 85 &ctx->sg_encrypted_num_elem, 86 &ctx->sg_encrypted_size, 87 target_size); 88 } 89 90 static int alloc_encrypted_sg(struct sock *sk, int len) 91 { 92 struct tls_context *tls_ctx = tls_get_ctx(sk); 93 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 94 int rc = 0; 95 96 rc = sk_alloc_sg(sk, len, 97 ctx->sg_encrypted_data, 0, 98 &ctx->sg_encrypted_num_elem, 99 &ctx->sg_encrypted_size, 0); 100 101 return rc; 102 } 103 104 static int alloc_plaintext_sg(struct sock *sk, int len) 105 { 106 struct tls_context *tls_ctx = tls_get_ctx(sk); 107 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 108 int rc = 0; 109 110 rc = sk_alloc_sg(sk, len, ctx->sg_plaintext_data, 0, 111 &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size, 112 tls_ctx->pending_open_record_frags); 113 114 return rc; 115 } 116 117 static void free_sg(struct sock *sk, struct scatterlist *sg, 118 int *sg_num_elem, unsigned int *sg_size) 119 { 120 int i, n = *sg_num_elem; 121 122 for (i = 0; i < n; ++i) { 123 sk_mem_uncharge(sk, sg[i].length); 124 put_page(sg_page(&sg[i])); 125 } 126 *sg_num_elem = 0; 127 *sg_size = 0; 128 } 129 130 static void tls_free_both_sg(struct sock *sk) 131 { 132 struct tls_context *tls_ctx = tls_get_ctx(sk); 133 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 134 135 free_sg(sk, ctx->sg_encrypted_data, &ctx->sg_encrypted_num_elem, 136 &ctx->sg_encrypted_size); 137 138 free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem, 139 &ctx->sg_plaintext_size); 140 } 141 142 static int tls_do_encryption(struct tls_context *tls_ctx, 143 struct tls_sw_context *ctx, size_t data_len, 144 gfp_t flags) 145 { 146 unsigned int req_size = sizeof(struct aead_request) + 147 crypto_aead_reqsize(ctx->aead_send); 148 struct aead_request *aead_req; 149 int rc; 150 151 aead_req = kzalloc(req_size, flags); 152 if (!aead_req) 153 return -ENOMEM; 154 155 ctx->sg_encrypted_data[0].offset += tls_ctx->tx.prepend_size; 156 ctx->sg_encrypted_data[0].length -= tls_ctx->tx.prepend_size; 157 158 aead_request_set_tfm(aead_req, ctx->aead_send); 159 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE); 160 aead_request_set_crypt(aead_req, ctx->sg_aead_in, ctx->sg_aead_out, 161 data_len, tls_ctx->tx.iv); 162 163 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, 164 crypto_req_done, &ctx->async_wait); 165 166 rc = crypto_wait_req(crypto_aead_encrypt(aead_req), &ctx->async_wait); 167 168 ctx->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size; 169 ctx->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size; 170 171 kfree(aead_req); 172 return rc; 173 } 174 175 static int tls_push_record(struct sock *sk, int flags, 176 unsigned char record_type) 177 { 178 struct tls_context *tls_ctx = tls_get_ctx(sk); 179 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 180 int rc; 181 182 sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1); 183 sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1); 184 185 tls_make_aad(ctx->aad_space, ctx->sg_plaintext_size, 186 tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size, 187 record_type); 188 189 tls_fill_prepend(tls_ctx, 190 page_address(sg_page(&ctx->sg_encrypted_data[0])) + 191 ctx->sg_encrypted_data[0].offset, 192 ctx->sg_plaintext_size, record_type); 193 194 tls_ctx->pending_open_record_frags = 0; 195 set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags); 196 197 rc = tls_do_encryption(tls_ctx, ctx, ctx->sg_plaintext_size, 198 sk->sk_allocation); 199 if (rc < 0) { 200 /* If we are called from write_space and 201 * we fail, we need to set this SOCK_NOSPACE 202 * to trigger another write_space in the future. 203 */ 204 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 205 return rc; 206 } 207 208 free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem, 209 &ctx->sg_plaintext_size); 210 211 ctx->sg_encrypted_num_elem = 0; 212 ctx->sg_encrypted_size = 0; 213 214 /* Only pass through MSG_DONTWAIT and MSG_NOSIGNAL flags */ 215 rc = tls_push_sg(sk, tls_ctx, ctx->sg_encrypted_data, 0, flags); 216 if (rc < 0 && rc != -EAGAIN) 217 tls_err_abort(sk); 218 219 tls_advance_record_sn(sk, &tls_ctx->tx); 220 return rc; 221 } 222 223 static int tls_sw_push_pending_record(struct sock *sk, int flags) 224 { 225 return tls_push_record(sk, flags, TLS_RECORD_TYPE_DATA); 226 } 227 228 static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from, 229 int length, int *pages_used, 230 unsigned int *size_used, 231 struct scatterlist *to, int to_max_pages, 232 bool charge) 233 { 234 struct page *pages[MAX_SKB_FRAGS]; 235 236 size_t offset; 237 ssize_t copied, use; 238 int i = 0; 239 unsigned int size = *size_used; 240 int num_elem = *pages_used; 241 int rc = 0; 242 int maxpages; 243 244 while (length > 0) { 245 i = 0; 246 maxpages = to_max_pages - num_elem; 247 if (maxpages == 0) { 248 rc = -EFAULT; 249 goto out; 250 } 251 copied = iov_iter_get_pages(from, pages, 252 length, 253 maxpages, &offset); 254 if (copied <= 0) { 255 rc = -EFAULT; 256 goto out; 257 } 258 259 iov_iter_advance(from, copied); 260 261 length -= copied; 262 size += copied; 263 while (copied) { 264 use = min_t(int, copied, PAGE_SIZE - offset); 265 266 sg_set_page(&to[num_elem], 267 pages[i], use, offset); 268 sg_unmark_end(&to[num_elem]); 269 if (charge) 270 sk_mem_charge(sk, use); 271 272 offset = 0; 273 copied -= use; 274 275 ++i; 276 ++num_elem; 277 } 278 } 279 280 out: 281 *size_used = size; 282 *pages_used = num_elem; 283 284 return rc; 285 } 286 287 static int memcopy_from_iter(struct sock *sk, struct iov_iter *from, 288 int bytes) 289 { 290 struct tls_context *tls_ctx = tls_get_ctx(sk); 291 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 292 struct scatterlist *sg = ctx->sg_plaintext_data; 293 int copy, i, rc = 0; 294 295 for (i = tls_ctx->pending_open_record_frags; 296 i < ctx->sg_plaintext_num_elem; ++i) { 297 copy = sg[i].length; 298 if (copy_from_iter( 299 page_address(sg_page(&sg[i])) + sg[i].offset, 300 copy, from) != copy) { 301 rc = -EFAULT; 302 goto out; 303 } 304 bytes -= copy; 305 306 ++tls_ctx->pending_open_record_frags; 307 308 if (!bytes) 309 break; 310 } 311 312 out: 313 return rc; 314 } 315 316 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) 317 { 318 struct tls_context *tls_ctx = tls_get_ctx(sk); 319 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 320 int ret = 0; 321 int required_size; 322 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 323 bool eor = !(msg->msg_flags & MSG_MORE); 324 size_t try_to_copy, copied = 0; 325 unsigned char record_type = TLS_RECORD_TYPE_DATA; 326 int record_room; 327 bool full_record; 328 int orig_size; 329 330 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL)) 331 return -ENOTSUPP; 332 333 lock_sock(sk); 334 335 if (tls_complete_pending_work(sk, tls_ctx, msg->msg_flags, &timeo)) 336 goto send_end; 337 338 if (unlikely(msg->msg_controllen)) { 339 ret = tls_proccess_cmsg(sk, msg, &record_type); 340 if (ret) 341 goto send_end; 342 } 343 344 while (msg_data_left(msg)) { 345 if (sk->sk_err) { 346 ret = -sk->sk_err; 347 goto send_end; 348 } 349 350 orig_size = ctx->sg_plaintext_size; 351 full_record = false; 352 try_to_copy = msg_data_left(msg); 353 record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size; 354 if (try_to_copy >= record_room) { 355 try_to_copy = record_room; 356 full_record = true; 357 } 358 359 required_size = ctx->sg_plaintext_size + try_to_copy + 360 tls_ctx->tx.overhead_size; 361 362 if (!sk_stream_memory_free(sk)) 363 goto wait_for_sndbuf; 364 alloc_encrypted: 365 ret = alloc_encrypted_sg(sk, required_size); 366 if (ret) { 367 if (ret != -ENOSPC) 368 goto wait_for_memory; 369 370 /* Adjust try_to_copy according to the amount that was 371 * actually allocated. The difference is due 372 * to max sg elements limit 373 */ 374 try_to_copy -= required_size - ctx->sg_encrypted_size; 375 full_record = true; 376 } 377 378 if (full_record || eor) { 379 ret = zerocopy_from_iter(sk, &msg->msg_iter, 380 try_to_copy, &ctx->sg_plaintext_num_elem, 381 &ctx->sg_plaintext_size, 382 ctx->sg_plaintext_data, 383 ARRAY_SIZE(ctx->sg_plaintext_data), 384 true); 385 if (ret) 386 goto fallback_to_reg_send; 387 388 copied += try_to_copy; 389 ret = tls_push_record(sk, msg->msg_flags, record_type); 390 if (!ret) 391 continue; 392 if (ret == -EAGAIN) 393 goto send_end; 394 395 copied -= try_to_copy; 396 fallback_to_reg_send: 397 iov_iter_revert(&msg->msg_iter, 398 ctx->sg_plaintext_size - orig_size); 399 trim_sg(sk, ctx->sg_plaintext_data, 400 &ctx->sg_plaintext_num_elem, 401 &ctx->sg_plaintext_size, 402 orig_size); 403 } 404 405 required_size = ctx->sg_plaintext_size + try_to_copy; 406 alloc_plaintext: 407 ret = alloc_plaintext_sg(sk, required_size); 408 if (ret) { 409 if (ret != -ENOSPC) 410 goto wait_for_memory; 411 412 /* Adjust try_to_copy according to the amount that was 413 * actually allocated. The difference is due 414 * to max sg elements limit 415 */ 416 try_to_copy -= required_size - ctx->sg_plaintext_size; 417 full_record = true; 418 419 trim_sg(sk, ctx->sg_encrypted_data, 420 &ctx->sg_encrypted_num_elem, 421 &ctx->sg_encrypted_size, 422 ctx->sg_plaintext_size + 423 tls_ctx->tx.overhead_size); 424 } 425 426 ret = memcopy_from_iter(sk, &msg->msg_iter, try_to_copy); 427 if (ret) 428 goto trim_sgl; 429 430 copied += try_to_copy; 431 if (full_record || eor) { 432 push_record: 433 ret = tls_push_record(sk, msg->msg_flags, record_type); 434 if (ret) { 435 if (ret == -ENOMEM) 436 goto wait_for_memory; 437 438 goto send_end; 439 } 440 } 441 442 continue; 443 444 wait_for_sndbuf: 445 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 446 wait_for_memory: 447 ret = sk_stream_wait_memory(sk, &timeo); 448 if (ret) { 449 trim_sgl: 450 trim_both_sgl(sk, orig_size); 451 goto send_end; 452 } 453 454 if (tls_is_pending_closed_record(tls_ctx)) 455 goto push_record; 456 457 if (ctx->sg_encrypted_size < required_size) 458 goto alloc_encrypted; 459 460 goto alloc_plaintext; 461 } 462 463 send_end: 464 ret = sk_stream_error(sk, msg->msg_flags, ret); 465 466 release_sock(sk); 467 return copied ? copied : ret; 468 } 469 470 int tls_sw_sendpage(struct sock *sk, struct page *page, 471 int offset, size_t size, int flags) 472 { 473 struct tls_context *tls_ctx = tls_get_ctx(sk); 474 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 475 int ret = 0; 476 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 477 bool eor; 478 size_t orig_size = size; 479 unsigned char record_type = TLS_RECORD_TYPE_DATA; 480 struct scatterlist *sg; 481 bool full_record; 482 int record_room; 483 484 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | 485 MSG_SENDPAGE_NOTLAST)) 486 return -ENOTSUPP; 487 488 /* No MSG_EOR from splice, only look at MSG_MORE */ 489 eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST)); 490 491 lock_sock(sk); 492 493 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 494 495 if (tls_complete_pending_work(sk, tls_ctx, flags, &timeo)) 496 goto sendpage_end; 497 498 /* Call the sk_stream functions to manage the sndbuf mem. */ 499 while (size > 0) { 500 size_t copy, required_size; 501 502 if (sk->sk_err) { 503 ret = -sk->sk_err; 504 goto sendpage_end; 505 } 506 507 full_record = false; 508 record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size; 509 copy = size; 510 if (copy >= record_room) { 511 copy = record_room; 512 full_record = true; 513 } 514 required_size = ctx->sg_plaintext_size + copy + 515 tls_ctx->tx.overhead_size; 516 517 if (!sk_stream_memory_free(sk)) 518 goto wait_for_sndbuf; 519 alloc_payload: 520 ret = alloc_encrypted_sg(sk, required_size); 521 if (ret) { 522 if (ret != -ENOSPC) 523 goto wait_for_memory; 524 525 /* Adjust copy according to the amount that was 526 * actually allocated. The difference is due 527 * to max sg elements limit 528 */ 529 copy -= required_size - ctx->sg_plaintext_size; 530 full_record = true; 531 } 532 533 get_page(page); 534 sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem; 535 sg_set_page(sg, page, copy, offset); 536 sg_unmark_end(sg); 537 538 ctx->sg_plaintext_num_elem++; 539 540 sk_mem_charge(sk, copy); 541 offset += copy; 542 size -= copy; 543 ctx->sg_plaintext_size += copy; 544 tls_ctx->pending_open_record_frags = ctx->sg_plaintext_num_elem; 545 546 if (full_record || eor || 547 ctx->sg_plaintext_num_elem == 548 ARRAY_SIZE(ctx->sg_plaintext_data)) { 549 push_record: 550 ret = tls_push_record(sk, flags, record_type); 551 if (ret) { 552 if (ret == -ENOMEM) 553 goto wait_for_memory; 554 555 goto sendpage_end; 556 } 557 } 558 continue; 559 wait_for_sndbuf: 560 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 561 wait_for_memory: 562 ret = sk_stream_wait_memory(sk, &timeo); 563 if (ret) { 564 trim_both_sgl(sk, ctx->sg_plaintext_size); 565 goto sendpage_end; 566 } 567 568 if (tls_is_pending_closed_record(tls_ctx)) 569 goto push_record; 570 571 goto alloc_payload; 572 } 573 574 sendpage_end: 575 if (orig_size > size) 576 ret = orig_size - size; 577 else 578 ret = sk_stream_error(sk, flags, ret); 579 580 release_sock(sk); 581 return ret; 582 } 583 584 void tls_sw_free_tx_resources(struct sock *sk) 585 { 586 struct tls_context *tls_ctx = tls_get_ctx(sk); 587 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 588 589 if (ctx->aead_send) 590 crypto_free_aead(ctx->aead_send); 591 592 tls_free_both_sg(sk); 593 594 kfree(ctx); 595 kfree(tls_ctx); 596 } 597 598 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx) 599 { 600 char keyval[TLS_CIPHER_AES_GCM_128_KEY_SIZE]; 601 struct tls_crypto_info *crypto_info; 602 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info; 603 struct tls_sw_context *sw_ctx; 604 u16 nonce_size, tag_size, iv_size, rec_seq_size; 605 char *iv, *rec_seq; 606 int rc = 0; 607 608 if (!ctx) { 609 rc = -EINVAL; 610 goto out; 611 } 612 613 if (ctx->priv_ctx) { 614 rc = -EEXIST; 615 goto out; 616 } 617 618 sw_ctx = kzalloc(sizeof(*sw_ctx), GFP_KERNEL); 619 if (!sw_ctx) { 620 rc = -ENOMEM; 621 goto out; 622 } 623 624 crypto_init_wait(&sw_ctx->async_wait); 625 626 ctx->priv_ctx = (struct tls_offload_context *)sw_ctx; 627 628 crypto_info = &ctx->crypto_send; 629 switch (crypto_info->cipher_type) { 630 case TLS_CIPHER_AES_GCM_128: { 631 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; 632 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE; 633 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; 634 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv; 635 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE; 636 rec_seq = 637 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq; 638 gcm_128_info = 639 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info; 640 break; 641 } 642 default: 643 rc = -EINVAL; 644 goto free_priv; 645 } 646 647 ctx->tx.prepend_size = TLS_HEADER_SIZE + nonce_size; 648 ctx->tx.tag_size = tag_size; 649 ctx->tx.overhead_size = ctx->tx.prepend_size + ctx->tx.tag_size; 650 ctx->tx.iv_size = iv_size; 651 ctx->tx.iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE, 652 GFP_KERNEL); 653 if (!ctx->tx.iv) { 654 rc = -ENOMEM; 655 goto free_priv; 656 } 657 memcpy(ctx->tx.iv, gcm_128_info->salt, 658 TLS_CIPHER_AES_GCM_128_SALT_SIZE); 659 memcpy(ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size); 660 ctx->tx.rec_seq_size = rec_seq_size; 661 ctx->tx.rec_seq = kmalloc(rec_seq_size, GFP_KERNEL); 662 if (!ctx->tx.rec_seq) { 663 rc = -ENOMEM; 664 goto free_iv; 665 } 666 memcpy(ctx->tx.rec_seq, rec_seq, rec_seq_size); 667 668 sg_init_table(sw_ctx->sg_encrypted_data, 669 ARRAY_SIZE(sw_ctx->sg_encrypted_data)); 670 sg_init_table(sw_ctx->sg_plaintext_data, 671 ARRAY_SIZE(sw_ctx->sg_plaintext_data)); 672 673 sg_init_table(sw_ctx->sg_aead_in, 2); 674 sg_set_buf(&sw_ctx->sg_aead_in[0], sw_ctx->aad_space, 675 sizeof(sw_ctx->aad_space)); 676 sg_unmark_end(&sw_ctx->sg_aead_in[1]); 677 sg_chain(sw_ctx->sg_aead_in, 2, sw_ctx->sg_plaintext_data); 678 sg_init_table(sw_ctx->sg_aead_out, 2); 679 sg_set_buf(&sw_ctx->sg_aead_out[0], sw_ctx->aad_space, 680 sizeof(sw_ctx->aad_space)); 681 sg_unmark_end(&sw_ctx->sg_aead_out[1]); 682 sg_chain(sw_ctx->sg_aead_out, 2, sw_ctx->sg_encrypted_data); 683 684 if (!sw_ctx->aead_send) { 685 sw_ctx->aead_send = crypto_alloc_aead("gcm(aes)", 0, 0); 686 if (IS_ERR(sw_ctx->aead_send)) { 687 rc = PTR_ERR(sw_ctx->aead_send); 688 sw_ctx->aead_send = NULL; 689 goto free_rec_seq; 690 } 691 } 692 693 ctx->push_pending_record = tls_sw_push_pending_record; 694 695 memcpy(keyval, gcm_128_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE); 696 697 rc = crypto_aead_setkey(sw_ctx->aead_send, keyval, 698 TLS_CIPHER_AES_GCM_128_KEY_SIZE); 699 if (rc) 700 goto free_aead; 701 702 rc = crypto_aead_setauthsize(sw_ctx->aead_send, ctx->tx.tag_size); 703 if (!rc) 704 return 0; 705 706 free_aead: 707 crypto_free_aead(sw_ctx->aead_send); 708 sw_ctx->aead_send = NULL; 709 free_rec_seq: 710 kfree(ctx->tx.rec_seq); 711 ctx->tx.rec_seq = NULL; 712 free_iv: 713 kfree(ctx->tx.iv); 714 ctx->tx.iv = NULL; 715 free_priv: 716 kfree(ctx->priv_ctx); 717 ctx->priv_ctx = NULL; 718 out: 719 return rc; 720 } 721