1 /* 2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #ifndef _TLS_OFFLOAD_H 35 #define _TLS_OFFLOAD_H 36 37 #include <linux/types.h> 38 #include <asm/byteorder.h> 39 #include <linux/crypto.h> 40 #include <linux/socket.h> 41 #include <linux/tcp.h> 42 #include <linux/skmsg.h> 43 #include <linux/mutex.h> 44 #include <linux/netdevice.h> 45 #include <linux/rcupdate.h> 46 47 #include <net/net_namespace.h> 48 #include <net/tcp.h> 49 #include <net/strparser.h> 50 #include <crypto/aead.h> 51 #include <uapi/linux/tls.h> 52 53 54 /* Maximum data size carried in a TLS record */ 55 #define TLS_MAX_PAYLOAD_SIZE ((size_t)1 << 14) 56 57 #define TLS_HEADER_SIZE 5 58 #define TLS_NONCE_OFFSET TLS_HEADER_SIZE 59 60 #define TLS_CRYPTO_INFO_READY(info) ((info)->cipher_type) 61 62 #define TLS_RECORD_TYPE_DATA 0x17 63 64 #define TLS_AAD_SPACE_SIZE 13 65 66 #define MAX_IV_SIZE 16 67 #define TLS_MAX_REC_SEQ_SIZE 8 68 69 /* For AES-CCM, the full 16-bytes of IV is made of '4' fields of given sizes. 70 * 71 * IV[16] = b0[1] || implicit nonce[4] || explicit nonce[8] || length[3] 72 * 73 * The field 'length' is encoded in field 'b0' as '(length width - 1)'. 74 * Hence b0 contains (3 - 1) = 2. 75 */ 76 #define TLS_AES_CCM_IV_B0_BYTE 2 77 78 #define __TLS_INC_STATS(net, field) \ 79 __SNMP_INC_STATS((net)->mib.tls_statistics, field) 80 #define TLS_INC_STATS(net, field) \ 81 SNMP_INC_STATS((net)->mib.tls_statistics, field) 82 #define __TLS_DEC_STATS(net, field) \ 83 __SNMP_DEC_STATS((net)->mib.tls_statistics, field) 84 #define TLS_DEC_STATS(net, field) \ 85 SNMP_DEC_STATS((net)->mib.tls_statistics, field) 86 87 enum { 88 TLS_BASE, 89 TLS_SW, 90 TLS_HW, 91 TLS_HW_RECORD, 92 TLS_NUM_CONFIG, 93 }; 94 95 /* TLS records are maintained in 'struct tls_rec'. It stores the memory pages 96 * allocated or mapped for each TLS record. After encryption, the records are 97 * stores in a linked list. 98 */ 99 struct tls_rec { 100 struct list_head list; 101 int tx_ready; 102 int tx_flags; 103 104 struct sk_msg msg_plaintext; 105 struct sk_msg msg_encrypted; 106 107 /* AAD | msg_plaintext.sg.data | sg_tag */ 108 struct scatterlist sg_aead_in[2]; 109 /* AAD | msg_encrypted.sg.data (data contains overhead for hdr & iv & tag) */ 110 struct scatterlist sg_aead_out[2]; 111 112 char content_type; 113 struct scatterlist sg_content_type; 114 115 char aad_space[TLS_AAD_SPACE_SIZE]; 116 u8 iv_data[MAX_IV_SIZE]; 117 struct aead_request aead_req; 118 u8 aead_req_ctx[]; 119 }; 120 121 struct tls_msg { 122 struct strp_msg rxm; 123 u8 control; 124 }; 125 126 struct tx_work { 127 struct delayed_work work; 128 struct sock *sk; 129 }; 130 131 struct tls_sw_context_tx { 132 struct crypto_aead *aead_send; 133 struct crypto_wait async_wait; 134 struct tx_work tx_work; 135 struct tls_rec *open_rec; 136 struct list_head tx_list; 137 atomic_t encrypt_pending; 138 /* protect crypto_wait with encrypt_pending */ 139 spinlock_t encrypt_compl_lock; 140 int async_notify; 141 u8 async_capable:1; 142 143 #define BIT_TX_SCHEDULED 0 144 #define BIT_TX_CLOSING 1 145 unsigned long tx_bitmask; 146 }; 147 148 struct tls_sw_context_rx { 149 struct crypto_aead *aead_recv; 150 struct crypto_wait async_wait; 151 struct strparser strp; 152 struct sk_buff_head rx_list; /* list of decrypted 'data' records */ 153 void (*saved_data_ready)(struct sock *sk); 154 155 struct sk_buff *recv_pkt; 156 u8 control; 157 u8 async_capable:1; 158 u8 decrypted:1; 159 atomic_t decrypt_pending; 160 /* protect crypto_wait with decrypt_pending*/ 161 spinlock_t decrypt_compl_lock; 162 bool async_notify; 163 }; 164 165 struct tls_record_info { 166 struct list_head list; 167 u32 end_seq; 168 int len; 169 int num_frags; 170 skb_frag_t frags[MAX_SKB_FRAGS]; 171 }; 172 173 struct tls_offload_context_tx { 174 struct crypto_aead *aead_send; 175 spinlock_t lock; /* protects records list */ 176 struct list_head records_list; 177 struct tls_record_info *open_record; 178 struct tls_record_info *retransmit_hint; 179 u64 hint_record_sn; 180 u64 unacked_record_sn; 181 182 struct scatterlist sg_tx_data[MAX_SKB_FRAGS]; 183 void (*sk_destruct)(struct sock *sk); 184 u8 driver_state[] __aligned(8); 185 /* The TLS layer reserves room for driver specific state 186 * Currently the belief is that there is not enough 187 * driver specific state to justify another layer of indirection 188 */ 189 #define TLS_DRIVER_STATE_SIZE_TX 16 190 }; 191 192 #define TLS_OFFLOAD_CONTEXT_SIZE_TX \ 193 (sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX) 194 195 enum tls_context_flags { 196 /* Unlike RX where resync is driven entirely by the core in TX only 197 * the driver knows when things went out of sync, so we need the flag 198 * to be atomic. 199 */ 200 TLS_TX_SYNC_SCHED = 1, 201 /* tls_dev_del was called for the RX side, device state was released, 202 * but tls_ctx->netdev might still be kept, because TX-side driver 203 * resources might not be released yet. Used to prevent the second 204 * tls_dev_del call in tls_device_down if it happens simultaneously. 205 */ 206 TLS_RX_DEV_CLOSED = 2, 207 }; 208 209 struct cipher_context { 210 char *iv; 211 char *rec_seq; 212 }; 213 214 union tls_crypto_context { 215 struct tls_crypto_info info; 216 union { 217 struct tls12_crypto_info_aes_gcm_128 aes_gcm_128; 218 struct tls12_crypto_info_aes_gcm_256 aes_gcm_256; 219 }; 220 }; 221 222 struct tls_prot_info { 223 u16 version; 224 u16 cipher_type; 225 u16 prepend_size; 226 u16 tag_size; 227 u16 overhead_size; 228 u16 iv_size; 229 u16 salt_size; 230 u16 rec_seq_size; 231 u16 aad_size; 232 u16 tail_size; 233 }; 234 235 struct tls_context { 236 /* read-only cache line */ 237 struct tls_prot_info prot_info; 238 239 u8 tx_conf:3; 240 u8 rx_conf:3; 241 242 int (*push_pending_record)(struct sock *sk, int flags); 243 void (*sk_write_space)(struct sock *sk); 244 245 void *priv_ctx_tx; 246 void *priv_ctx_rx; 247 248 struct net_device *netdev; 249 250 /* rw cache line */ 251 struct cipher_context tx; 252 struct cipher_context rx; 253 254 struct scatterlist *partially_sent_record; 255 u16 partially_sent_offset; 256 257 bool in_tcp_sendpages; 258 bool pending_open_record_frags; 259 260 struct mutex tx_lock; /* protects partially_sent_* fields and 261 * per-type TX fields 262 */ 263 unsigned long flags; 264 265 /* cache cold stuff */ 266 struct proto *sk_proto; 267 268 void (*sk_destruct)(struct sock *sk); 269 270 union tls_crypto_context crypto_send; 271 union tls_crypto_context crypto_recv; 272 273 struct list_head list; 274 refcount_t refcount; 275 struct rcu_head rcu; 276 }; 277 278 enum tls_offload_ctx_dir { 279 TLS_OFFLOAD_CTX_DIR_RX, 280 TLS_OFFLOAD_CTX_DIR_TX, 281 }; 282 283 struct tlsdev_ops { 284 int (*tls_dev_add)(struct net_device *netdev, struct sock *sk, 285 enum tls_offload_ctx_dir direction, 286 struct tls_crypto_info *crypto_info, 287 u32 start_offload_tcp_sn); 288 void (*tls_dev_del)(struct net_device *netdev, 289 struct tls_context *ctx, 290 enum tls_offload_ctx_dir direction); 291 int (*tls_dev_resync)(struct net_device *netdev, 292 struct sock *sk, u32 seq, u8 *rcd_sn, 293 enum tls_offload_ctx_dir direction); 294 }; 295 296 enum tls_offload_sync_type { 297 TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ = 0, 298 TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT = 1, 299 TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC = 2, 300 }; 301 302 #define TLS_DEVICE_RESYNC_NH_START_IVAL 2 303 #define TLS_DEVICE_RESYNC_NH_MAX_IVAL 128 304 305 #define TLS_DEVICE_RESYNC_ASYNC_LOGMAX 13 306 struct tls_offload_resync_async { 307 atomic64_t req; 308 u16 loglen; 309 u16 rcd_delta; 310 u32 log[TLS_DEVICE_RESYNC_ASYNC_LOGMAX]; 311 }; 312 313 struct tls_offload_context_rx { 314 /* sw must be the first member of tls_offload_context_rx */ 315 struct tls_sw_context_rx sw; 316 enum tls_offload_sync_type resync_type; 317 /* this member is set regardless of resync_type, to avoid branches */ 318 u8 resync_nh_reset:1; 319 /* CORE_NEXT_HINT-only member, but use the hole here */ 320 u8 resync_nh_do_now:1; 321 union { 322 /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ */ 323 struct { 324 atomic64_t resync_req; 325 }; 326 /* TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT */ 327 struct { 328 u32 decrypted_failed; 329 u32 decrypted_tgt; 330 } resync_nh; 331 /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC */ 332 struct { 333 struct tls_offload_resync_async *resync_async; 334 }; 335 }; 336 u8 driver_state[] __aligned(8); 337 /* The TLS layer reserves room for driver specific state 338 * Currently the belief is that there is not enough 339 * driver specific state to justify another layer of indirection 340 */ 341 #define TLS_DRIVER_STATE_SIZE_RX 8 342 }; 343 344 #define TLS_OFFLOAD_CONTEXT_SIZE_RX \ 345 (sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX) 346 347 struct tls_context *tls_ctx_create(struct sock *sk); 348 void tls_ctx_free(struct sock *sk, struct tls_context *ctx); 349 void update_sk_prot(struct sock *sk, struct tls_context *ctx); 350 351 int wait_on_pending_writer(struct sock *sk, long *timeo); 352 int tls_sk_query(struct sock *sk, int optname, char __user *optval, 353 int __user *optlen); 354 int tls_sk_attach(struct sock *sk, int optname, char __user *optval, 355 unsigned int optlen); 356 357 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx); 358 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx); 359 void tls_sw_strparser_done(struct tls_context *tls_ctx); 360 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); 361 int tls_sw_sendpage_locked(struct sock *sk, struct page *page, 362 int offset, size_t size, int flags); 363 int tls_sw_sendpage(struct sock *sk, struct page *page, 364 int offset, size_t size, int flags); 365 void tls_sw_cancel_work_tx(struct tls_context *tls_ctx); 366 void tls_sw_release_resources_tx(struct sock *sk); 367 void tls_sw_free_ctx_tx(struct tls_context *tls_ctx); 368 void tls_sw_free_resources_rx(struct sock *sk); 369 void tls_sw_release_resources_rx(struct sock *sk); 370 void tls_sw_free_ctx_rx(struct tls_context *tls_ctx); 371 int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 372 int nonblock, int flags, int *addr_len); 373 bool tls_sw_stream_read(const struct sock *sk); 374 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, 375 struct pipe_inode_info *pipe, 376 size_t len, unsigned int flags); 377 378 int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); 379 int tls_device_sendpage(struct sock *sk, struct page *page, 380 int offset, size_t size, int flags); 381 int tls_tx_records(struct sock *sk, int flags); 382 383 struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context, 384 u32 seq, u64 *p_record_sn); 385 386 static inline bool tls_record_is_start_marker(struct tls_record_info *rec) 387 { 388 return rec->len == 0; 389 } 390 391 static inline u32 tls_record_start_seq(struct tls_record_info *rec) 392 { 393 return rec->end_seq - rec->len; 394 } 395 396 int tls_push_sg(struct sock *sk, struct tls_context *ctx, 397 struct scatterlist *sg, u16 first_offset, 398 int flags); 399 int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, 400 int flags); 401 void tls_free_partial_record(struct sock *sk, struct tls_context *ctx); 402 403 static inline struct tls_msg *tls_msg(struct sk_buff *skb) 404 { 405 return (struct tls_msg *)strp_msg(skb); 406 } 407 408 static inline bool tls_is_partially_sent_record(struct tls_context *ctx) 409 { 410 return !!ctx->partially_sent_record; 411 } 412 413 static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx) 414 { 415 return tls_ctx->pending_open_record_frags; 416 } 417 418 static inline bool is_tx_ready(struct tls_sw_context_tx *ctx) 419 { 420 struct tls_rec *rec; 421 422 rec = list_first_entry(&ctx->tx_list, struct tls_rec, list); 423 if (!rec) 424 return false; 425 426 return READ_ONCE(rec->tx_ready); 427 } 428 429 static inline u16 tls_user_config(struct tls_context *ctx, bool tx) 430 { 431 u16 config = tx ? ctx->tx_conf : ctx->rx_conf; 432 433 switch (config) { 434 case TLS_BASE: 435 return TLS_CONF_BASE; 436 case TLS_SW: 437 return TLS_CONF_SW; 438 case TLS_HW: 439 return TLS_CONF_HW; 440 case TLS_HW_RECORD: 441 return TLS_CONF_HW_RECORD; 442 } 443 return 0; 444 } 445 446 struct sk_buff * 447 tls_validate_xmit_skb(struct sock *sk, struct net_device *dev, 448 struct sk_buff *skb); 449 450 static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk) 451 { 452 #ifdef CONFIG_SOCK_VALIDATE_XMIT 453 return sk_fullsock(sk) && 454 (smp_load_acquire(&sk->sk_validate_xmit_skb) == 455 &tls_validate_xmit_skb); 456 #else 457 return false; 458 #endif 459 } 460 461 static inline void tls_err_abort(struct sock *sk, int err) 462 { 463 sk->sk_err = err; 464 sk->sk_error_report(sk); 465 } 466 467 static inline bool tls_bigint_increment(unsigned char *seq, int len) 468 { 469 int i; 470 471 for (i = len - 1; i >= 0; i--) { 472 ++seq[i]; 473 if (seq[i] != 0) 474 break; 475 } 476 477 return (i == -1); 478 } 479 480 static inline void tls_bigint_subtract(unsigned char *seq, int n) 481 { 482 u64 rcd_sn; 483 __be64 *p; 484 485 BUILD_BUG_ON(TLS_MAX_REC_SEQ_SIZE != 8); 486 487 p = (__be64 *)seq; 488 rcd_sn = be64_to_cpu(*p); 489 *p = cpu_to_be64(rcd_sn - n); 490 } 491 492 static inline struct tls_context *tls_get_ctx(const struct sock *sk) 493 { 494 struct inet_connection_sock *icsk = inet_csk(sk); 495 496 /* Use RCU on icsk_ulp_data only for sock diag code, 497 * TLS data path doesn't need rcu_dereference(). 498 */ 499 return (__force void *)icsk->icsk_ulp_data; 500 } 501 502 static inline void tls_advance_record_sn(struct sock *sk, 503 struct tls_prot_info *prot, 504 struct cipher_context *ctx) 505 { 506 if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size)) 507 tls_err_abort(sk, EBADMSG); 508 509 if (prot->version != TLS_1_3_VERSION) 510 tls_bigint_increment(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, 511 prot->iv_size); 512 } 513 514 static inline void tls_fill_prepend(struct tls_context *ctx, 515 char *buf, 516 size_t plaintext_len, 517 unsigned char record_type, 518 int version) 519 { 520 struct tls_prot_info *prot = &ctx->prot_info; 521 size_t pkt_len, iv_size = prot->iv_size; 522 523 pkt_len = plaintext_len + prot->tag_size; 524 if (version != TLS_1_3_VERSION) { 525 pkt_len += iv_size; 526 527 memcpy(buf + TLS_NONCE_OFFSET, 528 ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv_size); 529 } 530 531 /* we cover nonce explicit here as well, so buf should be of 532 * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE 533 */ 534 buf[0] = version == TLS_1_3_VERSION ? 535 TLS_RECORD_TYPE_DATA : record_type; 536 /* Note that VERSION must be TLS_1_2 for both TLS1.2 and TLS1.3 */ 537 buf[1] = TLS_1_2_VERSION_MINOR; 538 buf[2] = TLS_1_2_VERSION_MAJOR; 539 /* we can use IV for nonce explicit according to spec */ 540 buf[3] = pkt_len >> 8; 541 buf[4] = pkt_len & 0xFF; 542 } 543 544 static inline void tls_make_aad(char *buf, 545 size_t size, 546 char *record_sequence, 547 int record_sequence_size, 548 unsigned char record_type, 549 int version) 550 { 551 if (version != TLS_1_3_VERSION) { 552 memcpy(buf, record_sequence, record_sequence_size); 553 buf += 8; 554 } else { 555 size += TLS_CIPHER_AES_GCM_128_TAG_SIZE; 556 } 557 558 buf[0] = version == TLS_1_3_VERSION ? 559 TLS_RECORD_TYPE_DATA : record_type; 560 buf[1] = TLS_1_2_VERSION_MAJOR; 561 buf[2] = TLS_1_2_VERSION_MINOR; 562 buf[3] = size >> 8; 563 buf[4] = size & 0xFF; 564 } 565 566 static inline void xor_iv_with_seq(int version, char *iv, char *seq) 567 { 568 int i; 569 570 if (version == TLS_1_3_VERSION) { 571 for (i = 0; i < 8; i++) 572 iv[i + 4] ^= seq[i]; 573 } 574 } 575 576 577 static inline struct tls_sw_context_rx *tls_sw_ctx_rx( 578 const struct tls_context *tls_ctx) 579 { 580 return (struct tls_sw_context_rx *)tls_ctx->priv_ctx_rx; 581 } 582 583 static inline struct tls_sw_context_tx *tls_sw_ctx_tx( 584 const struct tls_context *tls_ctx) 585 { 586 return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx; 587 } 588 589 static inline struct tls_offload_context_tx * 590 tls_offload_ctx_tx(const struct tls_context *tls_ctx) 591 { 592 return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx; 593 } 594 595 static inline bool tls_sw_has_ctx_tx(const struct sock *sk) 596 { 597 struct tls_context *ctx = tls_get_ctx(sk); 598 599 if (!ctx) 600 return false; 601 return !!tls_sw_ctx_tx(ctx); 602 } 603 604 static inline bool tls_sw_has_ctx_rx(const struct sock *sk) 605 { 606 struct tls_context *ctx = tls_get_ctx(sk); 607 608 if (!ctx) 609 return false; 610 return !!tls_sw_ctx_rx(ctx); 611 } 612 613 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx); 614 void tls_device_write_space(struct sock *sk, struct tls_context *ctx); 615 616 static inline struct tls_offload_context_rx * 617 tls_offload_ctx_rx(const struct tls_context *tls_ctx) 618 { 619 return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx; 620 } 621 622 #if IS_ENABLED(CONFIG_TLS_DEVICE) 623 static inline void *__tls_driver_ctx(struct tls_context *tls_ctx, 624 enum tls_offload_ctx_dir direction) 625 { 626 if (direction == TLS_OFFLOAD_CTX_DIR_TX) 627 return tls_offload_ctx_tx(tls_ctx)->driver_state; 628 else 629 return tls_offload_ctx_rx(tls_ctx)->driver_state; 630 } 631 632 static inline void * 633 tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction) 634 { 635 return __tls_driver_ctx(tls_get_ctx(sk), direction); 636 } 637 #endif 638 639 #define RESYNC_REQ BIT(0) 640 #define RESYNC_REQ_ASYNC BIT(1) 641 /* The TLS context is valid until sk_destruct is called */ 642 static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq) 643 { 644 struct tls_context *tls_ctx = tls_get_ctx(sk); 645 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); 646 647 atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | RESYNC_REQ); 648 } 649 650 /* Log all TLS record header TCP sequences in [seq, seq+len] */ 651 static inline void 652 tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len) 653 { 654 struct tls_context *tls_ctx = tls_get_ctx(sk); 655 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); 656 657 atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) | 658 ((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC); 659 rx_ctx->resync_async->loglen = 0; 660 rx_ctx->resync_async->rcd_delta = 0; 661 } 662 663 static inline void 664 tls_offload_rx_resync_async_request_end(struct sock *sk, __be32 seq) 665 { 666 struct tls_context *tls_ctx = tls_get_ctx(sk); 667 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); 668 669 atomic64_set(&rx_ctx->resync_async->req, 670 ((u64)ntohl(seq) << 32) | RESYNC_REQ); 671 } 672 673 static inline void 674 tls_offload_rx_resync_set_type(struct sock *sk, enum tls_offload_sync_type type) 675 { 676 struct tls_context *tls_ctx = tls_get_ctx(sk); 677 678 tls_offload_ctx_rx(tls_ctx)->resync_type = type; 679 } 680 681 /* Driver's seq tracking has to be disabled until resync succeeded */ 682 static inline bool tls_offload_tx_resync_pending(struct sock *sk) 683 { 684 struct tls_context *tls_ctx = tls_get_ctx(sk); 685 bool ret; 686 687 ret = test_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags); 688 smp_mb__after_atomic(); 689 return ret; 690 } 691 692 int __net_init tls_proc_init(struct net *net); 693 void __net_exit tls_proc_fini(struct net *net); 694 695 int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg, 696 unsigned char *record_type); 697 int decrypt_skb(struct sock *sk, struct sk_buff *skb, 698 struct scatterlist *sgout); 699 struct sk_buff *tls_encrypt_skb(struct sk_buff *skb); 700 701 int tls_sw_fallback_init(struct sock *sk, 702 struct tls_offload_context_tx *offload_ctx, 703 struct tls_crypto_info *crypto_info); 704 705 #ifdef CONFIG_TLS_DEVICE 706 void tls_device_init(void); 707 void tls_device_cleanup(void); 708 void tls_device_sk_destruct(struct sock *sk); 709 int tls_set_device_offload(struct sock *sk, struct tls_context *ctx); 710 void tls_device_free_resources_tx(struct sock *sk); 711 int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx); 712 void tls_device_offload_cleanup_rx(struct sock *sk); 713 void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq); 714 void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq); 715 int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx, 716 struct sk_buff *skb, struct strp_msg *rxm); 717 718 static inline bool tls_is_sk_rx_device_offloaded(struct sock *sk) 719 { 720 if (!sk_fullsock(sk) || 721 smp_load_acquire(&sk->sk_destruct) != tls_device_sk_destruct) 722 return false; 723 return tls_get_ctx(sk)->rx_conf == TLS_HW; 724 } 725 #else 726 static inline void tls_device_init(void) {} 727 static inline void tls_device_cleanup(void) {} 728 729 static inline int 730 tls_set_device_offload(struct sock *sk, struct tls_context *ctx) 731 { 732 return -EOPNOTSUPP; 733 } 734 735 static inline void tls_device_free_resources_tx(struct sock *sk) {} 736 737 static inline int 738 tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx) 739 { 740 return -EOPNOTSUPP; 741 } 742 743 static inline void tls_device_offload_cleanup_rx(struct sock *sk) {} 744 static inline void 745 tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) {} 746 747 static inline int 748 tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx, 749 struct sk_buff *skb, struct strp_msg *rxm) 750 { 751 return 0; 752 } 753 #endif 754 #endif /* _TLS_OFFLOAD_H */ 755