1 /* 2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #ifndef _TLS_OFFLOAD_H 35 #define _TLS_OFFLOAD_H 36 37 #include <linux/types.h> 38 #include <asm/byteorder.h> 39 #include <linux/crypto.h> 40 #include <linux/socket.h> 41 #include <linux/tcp.h> 42 #include <linux/skmsg.h> 43 #include <linux/mutex.h> 44 #include <linux/netdevice.h> 45 #include <linux/rcupdate.h> 46 47 #include <net/net_namespace.h> 48 #include <net/tcp.h> 49 #include <net/strparser.h> 50 #include <crypto/aead.h> 51 #include <uapi/linux/tls.h> 52 53 54 /* Maximum data size carried in a TLS record */ 55 #define TLS_MAX_PAYLOAD_SIZE ((size_t)1 << 14) 56 57 #define TLS_HEADER_SIZE 5 58 #define TLS_NONCE_OFFSET TLS_HEADER_SIZE 59 60 #define TLS_CRYPTO_INFO_READY(info) ((info)->cipher_type) 61 62 #define TLS_RECORD_TYPE_DATA 0x17 63 64 #define TLS_AAD_SPACE_SIZE 13 65 66 #define MAX_IV_SIZE 16 67 #define TLS_TAG_SIZE 16 68 #define TLS_MAX_REC_SEQ_SIZE 8 69 #define TLS_MAX_AAD_SIZE TLS_AAD_SPACE_SIZE 70 71 /* For CCM mode, the full 16-bytes of IV is made of '4' fields of given sizes. 72 * 73 * IV[16] = b0[1] || implicit nonce[4] || explicit nonce[8] || length[3] 74 * 75 * The field 'length' is encoded in field 'b0' as '(length width - 1)'. 76 * Hence b0 contains (3 - 1) = 2. 77 */ 78 #define TLS_AES_CCM_IV_B0_BYTE 2 79 #define TLS_SM4_CCM_IV_B0_BYTE 2 80 81 #define __TLS_INC_STATS(net, field) \ 82 __SNMP_INC_STATS((net)->mib.tls_statistics, field) 83 #define TLS_INC_STATS(net, field) \ 84 SNMP_INC_STATS((net)->mib.tls_statistics, field) 85 #define TLS_DEC_STATS(net, field) \ 86 SNMP_DEC_STATS((net)->mib.tls_statistics, field) 87 88 enum { 89 TLS_BASE, 90 TLS_SW, 91 TLS_HW, 92 TLS_HW_RECORD, 93 TLS_NUM_CONFIG, 94 }; 95 96 /* TLS records are maintained in 'struct tls_rec'. It stores the memory pages 97 * allocated or mapped for each TLS record. After encryption, the records are 98 * stores in a linked list. 99 */ 100 struct tls_rec { 101 struct list_head list; 102 int tx_ready; 103 int tx_flags; 104 105 struct sk_msg msg_plaintext; 106 struct sk_msg msg_encrypted; 107 108 /* AAD | msg_plaintext.sg.data | sg_tag */ 109 struct scatterlist sg_aead_in[2]; 110 /* AAD | msg_encrypted.sg.data (data contains overhead for hdr & iv & tag) */ 111 struct scatterlist sg_aead_out[2]; 112 113 char content_type; 114 struct scatterlist sg_content_type; 115 116 char aad_space[TLS_AAD_SPACE_SIZE]; 117 u8 iv_data[MAX_IV_SIZE]; 118 struct aead_request aead_req; 119 u8 aead_req_ctx[]; 120 }; 121 122 struct tx_work { 123 struct delayed_work work; 124 struct sock *sk; 125 }; 126 127 struct tls_sw_context_tx { 128 struct crypto_aead *aead_send; 129 struct crypto_wait async_wait; 130 struct tx_work tx_work; 131 struct tls_rec *open_rec; 132 struct list_head tx_list; 133 atomic_t encrypt_pending; 134 /* protect crypto_wait with encrypt_pending */ 135 spinlock_t encrypt_compl_lock; 136 int async_notify; 137 u8 async_capable:1; 138 139 #define BIT_TX_SCHEDULED 0 140 #define BIT_TX_CLOSING 1 141 unsigned long tx_bitmask; 142 }; 143 144 struct tls_sw_context_rx { 145 struct crypto_aead *aead_recv; 146 struct crypto_wait async_wait; 147 struct strparser strp; 148 struct sk_buff_head rx_list; /* list of decrypted 'data' records */ 149 void (*saved_data_ready)(struct sock *sk); 150 151 struct sk_buff *recv_pkt; 152 u8 async_capable:1; 153 u8 zc_capable:1; 154 atomic_t decrypt_pending; 155 /* protect crypto_wait with decrypt_pending*/ 156 spinlock_t decrypt_compl_lock; 157 }; 158 159 struct tls_record_info { 160 struct list_head list; 161 u32 end_seq; 162 int len; 163 int num_frags; 164 skb_frag_t frags[MAX_SKB_FRAGS]; 165 }; 166 167 struct tls_offload_context_tx { 168 struct crypto_aead *aead_send; 169 spinlock_t lock; /* protects records list */ 170 struct list_head records_list; 171 struct tls_record_info *open_record; 172 struct tls_record_info *retransmit_hint; 173 u64 hint_record_sn; 174 u64 unacked_record_sn; 175 176 struct scatterlist sg_tx_data[MAX_SKB_FRAGS]; 177 void (*sk_destruct)(struct sock *sk); 178 u8 driver_state[] __aligned(8); 179 /* The TLS layer reserves room for driver specific state 180 * Currently the belief is that there is not enough 181 * driver specific state to justify another layer of indirection 182 */ 183 #define TLS_DRIVER_STATE_SIZE_TX 16 184 }; 185 186 #define TLS_OFFLOAD_CONTEXT_SIZE_TX \ 187 (sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX) 188 189 enum tls_context_flags { 190 /* tls_device_down was called after the netdev went down, device state 191 * was released, and kTLS works in software, even though rx_conf is 192 * still TLS_HW (needed for transition). 193 */ 194 TLS_RX_DEV_DEGRADED = 0, 195 /* Unlike RX where resync is driven entirely by the core in TX only 196 * the driver knows when things went out of sync, so we need the flag 197 * to be atomic. 198 */ 199 TLS_TX_SYNC_SCHED = 1, 200 /* tls_dev_del was called for the RX side, device state was released, 201 * but tls_ctx->netdev might still be kept, because TX-side driver 202 * resources might not be released yet. Used to prevent the second 203 * tls_dev_del call in tls_device_down if it happens simultaneously. 204 */ 205 TLS_RX_DEV_CLOSED = 2, 206 }; 207 208 struct cipher_context { 209 char *iv; 210 char *rec_seq; 211 }; 212 213 union tls_crypto_context { 214 struct tls_crypto_info info; 215 union { 216 struct tls12_crypto_info_aes_gcm_128 aes_gcm_128; 217 struct tls12_crypto_info_aes_gcm_256 aes_gcm_256; 218 struct tls12_crypto_info_chacha20_poly1305 chacha20_poly1305; 219 struct tls12_crypto_info_sm4_gcm sm4_gcm; 220 struct tls12_crypto_info_sm4_ccm sm4_ccm; 221 }; 222 }; 223 224 struct tls_prot_info { 225 u16 version; 226 u16 cipher_type; 227 u16 prepend_size; 228 u16 tag_size; 229 u16 overhead_size; 230 u16 iv_size; 231 u16 salt_size; 232 u16 rec_seq_size; 233 u16 aad_size; 234 u16 tail_size; 235 }; 236 237 struct tls_context { 238 /* read-only cache line */ 239 struct tls_prot_info prot_info; 240 241 u8 tx_conf:3; 242 u8 rx_conf:3; 243 u8 zerocopy_sendfile:1; 244 u8 rx_no_pad:1; 245 246 int (*push_pending_record)(struct sock *sk, int flags); 247 void (*sk_write_space)(struct sock *sk); 248 249 void *priv_ctx_tx; 250 void *priv_ctx_rx; 251 252 struct net_device *netdev; 253 254 /* rw cache line */ 255 struct cipher_context tx; 256 struct cipher_context rx; 257 258 struct scatterlist *partially_sent_record; 259 u16 partially_sent_offset; 260 261 bool in_tcp_sendpages; 262 bool pending_open_record_frags; 263 264 struct mutex tx_lock; /* protects partially_sent_* fields and 265 * per-type TX fields 266 */ 267 unsigned long flags; 268 269 /* cache cold stuff */ 270 struct proto *sk_proto; 271 struct sock *sk; 272 273 void (*sk_destruct)(struct sock *sk); 274 275 union tls_crypto_context crypto_send; 276 union tls_crypto_context crypto_recv; 277 278 struct list_head list; 279 refcount_t refcount; 280 struct rcu_head rcu; 281 }; 282 283 enum tls_offload_ctx_dir { 284 TLS_OFFLOAD_CTX_DIR_RX, 285 TLS_OFFLOAD_CTX_DIR_TX, 286 }; 287 288 struct tlsdev_ops { 289 int (*tls_dev_add)(struct net_device *netdev, struct sock *sk, 290 enum tls_offload_ctx_dir direction, 291 struct tls_crypto_info *crypto_info, 292 u32 start_offload_tcp_sn); 293 void (*tls_dev_del)(struct net_device *netdev, 294 struct tls_context *ctx, 295 enum tls_offload_ctx_dir direction); 296 int (*tls_dev_resync)(struct net_device *netdev, 297 struct sock *sk, u32 seq, u8 *rcd_sn, 298 enum tls_offload_ctx_dir direction); 299 }; 300 301 enum tls_offload_sync_type { 302 TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ = 0, 303 TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT = 1, 304 TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC = 2, 305 }; 306 307 #define TLS_DEVICE_RESYNC_NH_START_IVAL 2 308 #define TLS_DEVICE_RESYNC_NH_MAX_IVAL 128 309 310 #define TLS_DEVICE_RESYNC_ASYNC_LOGMAX 13 311 struct tls_offload_resync_async { 312 atomic64_t req; 313 u16 loglen; 314 u16 rcd_delta; 315 u32 log[TLS_DEVICE_RESYNC_ASYNC_LOGMAX]; 316 }; 317 318 struct tls_offload_context_rx { 319 /* sw must be the first member of tls_offload_context_rx */ 320 struct tls_sw_context_rx sw; 321 enum tls_offload_sync_type resync_type; 322 /* this member is set regardless of resync_type, to avoid branches */ 323 u8 resync_nh_reset:1; 324 /* CORE_NEXT_HINT-only member, but use the hole here */ 325 u8 resync_nh_do_now:1; 326 union { 327 /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ */ 328 struct { 329 atomic64_t resync_req; 330 }; 331 /* TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT */ 332 struct { 333 u32 decrypted_failed; 334 u32 decrypted_tgt; 335 } resync_nh; 336 /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC */ 337 struct { 338 struct tls_offload_resync_async *resync_async; 339 }; 340 }; 341 u8 driver_state[] __aligned(8); 342 /* The TLS layer reserves room for driver specific state 343 * Currently the belief is that there is not enough 344 * driver specific state to justify another layer of indirection 345 */ 346 #define TLS_DRIVER_STATE_SIZE_RX 8 347 }; 348 349 #define TLS_OFFLOAD_CONTEXT_SIZE_RX \ 350 (sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX) 351 352 struct tls_context *tls_ctx_create(struct sock *sk); 353 void tls_ctx_free(struct sock *sk, struct tls_context *ctx); 354 void update_sk_prot(struct sock *sk, struct tls_context *ctx); 355 356 int wait_on_pending_writer(struct sock *sk, long *timeo); 357 int tls_sk_query(struct sock *sk, int optname, char __user *optval, 358 int __user *optlen); 359 int tls_sk_attach(struct sock *sk, int optname, char __user *optval, 360 unsigned int optlen); 361 void tls_err_abort(struct sock *sk, int err); 362 363 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx); 364 void tls_update_rx_zc_capable(struct tls_context *tls_ctx); 365 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx); 366 void tls_sw_strparser_done(struct tls_context *tls_ctx); 367 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); 368 int tls_sw_sendpage_locked(struct sock *sk, struct page *page, 369 int offset, size_t size, int flags); 370 int tls_sw_sendpage(struct sock *sk, struct page *page, 371 int offset, size_t size, int flags); 372 void tls_sw_cancel_work_tx(struct tls_context *tls_ctx); 373 void tls_sw_release_resources_tx(struct sock *sk); 374 void tls_sw_free_ctx_tx(struct tls_context *tls_ctx); 375 void tls_sw_free_resources_rx(struct sock *sk); 376 void tls_sw_release_resources_rx(struct sock *sk); 377 void tls_sw_free_ctx_rx(struct tls_context *tls_ctx); 378 int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 379 int flags, int *addr_len); 380 bool tls_sw_sock_is_readable(struct sock *sk); 381 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, 382 struct pipe_inode_info *pipe, 383 size_t len, unsigned int flags); 384 385 int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); 386 int tls_device_sendpage(struct sock *sk, struct page *page, 387 int offset, size_t size, int flags); 388 int tls_tx_records(struct sock *sk, int flags); 389 390 struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context, 391 u32 seq, u64 *p_record_sn); 392 393 static inline bool tls_record_is_start_marker(struct tls_record_info *rec) 394 { 395 return rec->len == 0; 396 } 397 398 static inline u32 tls_record_start_seq(struct tls_record_info *rec) 399 { 400 return rec->end_seq - rec->len; 401 } 402 403 int tls_push_sg(struct sock *sk, struct tls_context *ctx, 404 struct scatterlist *sg, u16 first_offset, 405 int flags); 406 int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, 407 int flags); 408 void tls_free_partial_record(struct sock *sk, struct tls_context *ctx); 409 410 static inline struct tls_msg *tls_msg(struct sk_buff *skb) 411 { 412 struct sk_skb_cb *scb = (struct sk_skb_cb *)skb->cb; 413 414 return &scb->tls; 415 } 416 417 static inline bool tls_is_partially_sent_record(struct tls_context *ctx) 418 { 419 return !!ctx->partially_sent_record; 420 } 421 422 static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx) 423 { 424 return tls_ctx->pending_open_record_frags; 425 } 426 427 static inline bool is_tx_ready(struct tls_sw_context_tx *ctx) 428 { 429 struct tls_rec *rec; 430 431 rec = list_first_entry(&ctx->tx_list, struct tls_rec, list); 432 if (!rec) 433 return false; 434 435 return READ_ONCE(rec->tx_ready); 436 } 437 438 static inline u16 tls_user_config(struct tls_context *ctx, bool tx) 439 { 440 u16 config = tx ? ctx->tx_conf : ctx->rx_conf; 441 442 switch (config) { 443 case TLS_BASE: 444 return TLS_CONF_BASE; 445 case TLS_SW: 446 return TLS_CONF_SW; 447 case TLS_HW: 448 return TLS_CONF_HW; 449 case TLS_HW_RECORD: 450 return TLS_CONF_HW_RECORD; 451 } 452 return 0; 453 } 454 455 struct sk_buff * 456 tls_validate_xmit_skb(struct sock *sk, struct net_device *dev, 457 struct sk_buff *skb); 458 struct sk_buff * 459 tls_validate_xmit_skb_sw(struct sock *sk, struct net_device *dev, 460 struct sk_buff *skb); 461 462 static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk) 463 { 464 #ifdef CONFIG_SOCK_VALIDATE_XMIT 465 return sk_fullsock(sk) && 466 (smp_load_acquire(&sk->sk_validate_xmit_skb) == 467 &tls_validate_xmit_skb); 468 #else 469 return false; 470 #endif 471 } 472 473 static inline bool tls_bigint_increment(unsigned char *seq, int len) 474 { 475 int i; 476 477 for (i = len - 1; i >= 0; i--) { 478 ++seq[i]; 479 if (seq[i] != 0) 480 break; 481 } 482 483 return (i == -1); 484 } 485 486 static inline void tls_bigint_subtract(unsigned char *seq, int n) 487 { 488 u64 rcd_sn; 489 __be64 *p; 490 491 BUILD_BUG_ON(TLS_MAX_REC_SEQ_SIZE != 8); 492 493 p = (__be64 *)seq; 494 rcd_sn = be64_to_cpu(*p); 495 *p = cpu_to_be64(rcd_sn - n); 496 } 497 498 static inline struct tls_context *tls_get_ctx(const struct sock *sk) 499 { 500 struct inet_connection_sock *icsk = inet_csk(sk); 501 502 /* Use RCU on icsk_ulp_data only for sock diag code, 503 * TLS data path doesn't need rcu_dereference(). 504 */ 505 return (__force void *)icsk->icsk_ulp_data; 506 } 507 508 static inline void tls_advance_record_sn(struct sock *sk, 509 struct tls_prot_info *prot, 510 struct cipher_context *ctx) 511 { 512 if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size)) 513 tls_err_abort(sk, -EBADMSG); 514 515 if (prot->version != TLS_1_3_VERSION && 516 prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305) 517 tls_bigint_increment(ctx->iv + prot->salt_size, 518 prot->iv_size); 519 } 520 521 static inline void tls_fill_prepend(struct tls_context *ctx, 522 char *buf, 523 size_t plaintext_len, 524 unsigned char record_type) 525 { 526 struct tls_prot_info *prot = &ctx->prot_info; 527 size_t pkt_len, iv_size = prot->iv_size; 528 529 pkt_len = plaintext_len + prot->tag_size; 530 if (prot->version != TLS_1_3_VERSION && 531 prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305) { 532 pkt_len += iv_size; 533 534 memcpy(buf + TLS_NONCE_OFFSET, 535 ctx->tx.iv + prot->salt_size, iv_size); 536 } 537 538 /* we cover nonce explicit here as well, so buf should be of 539 * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE 540 */ 541 buf[0] = prot->version == TLS_1_3_VERSION ? 542 TLS_RECORD_TYPE_DATA : record_type; 543 /* Note that VERSION must be TLS_1_2 for both TLS1.2 and TLS1.3 */ 544 buf[1] = TLS_1_2_VERSION_MINOR; 545 buf[2] = TLS_1_2_VERSION_MAJOR; 546 /* we can use IV for nonce explicit according to spec */ 547 buf[3] = pkt_len >> 8; 548 buf[4] = pkt_len & 0xFF; 549 } 550 551 static inline void tls_make_aad(char *buf, 552 size_t size, 553 char *record_sequence, 554 unsigned char record_type, 555 struct tls_prot_info *prot) 556 { 557 if (prot->version != TLS_1_3_VERSION) { 558 memcpy(buf, record_sequence, prot->rec_seq_size); 559 buf += 8; 560 } else { 561 size += prot->tag_size; 562 } 563 564 buf[0] = prot->version == TLS_1_3_VERSION ? 565 TLS_RECORD_TYPE_DATA : record_type; 566 buf[1] = TLS_1_2_VERSION_MAJOR; 567 buf[2] = TLS_1_2_VERSION_MINOR; 568 buf[3] = size >> 8; 569 buf[4] = size & 0xFF; 570 } 571 572 static inline void xor_iv_with_seq(struct tls_prot_info *prot, char *iv, char *seq) 573 { 574 int i; 575 576 if (prot->version == TLS_1_3_VERSION || 577 prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) { 578 for (i = 0; i < 8; i++) 579 iv[i + 4] ^= seq[i]; 580 } 581 } 582 583 584 static inline struct tls_sw_context_rx *tls_sw_ctx_rx( 585 const struct tls_context *tls_ctx) 586 { 587 return (struct tls_sw_context_rx *)tls_ctx->priv_ctx_rx; 588 } 589 590 static inline struct tls_sw_context_tx *tls_sw_ctx_tx( 591 const struct tls_context *tls_ctx) 592 { 593 return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx; 594 } 595 596 static inline struct tls_offload_context_tx * 597 tls_offload_ctx_tx(const struct tls_context *tls_ctx) 598 { 599 return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx; 600 } 601 602 static inline bool tls_sw_has_ctx_tx(const struct sock *sk) 603 { 604 struct tls_context *ctx = tls_get_ctx(sk); 605 606 if (!ctx) 607 return false; 608 return !!tls_sw_ctx_tx(ctx); 609 } 610 611 static inline bool tls_sw_has_ctx_rx(const struct sock *sk) 612 { 613 struct tls_context *ctx = tls_get_ctx(sk); 614 615 if (!ctx) 616 return false; 617 return !!tls_sw_ctx_rx(ctx); 618 } 619 620 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx); 621 void tls_device_write_space(struct sock *sk, struct tls_context *ctx); 622 623 static inline struct tls_offload_context_rx * 624 tls_offload_ctx_rx(const struct tls_context *tls_ctx) 625 { 626 return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx; 627 } 628 629 static inline void *__tls_driver_ctx(struct tls_context *tls_ctx, 630 enum tls_offload_ctx_dir direction) 631 { 632 if (direction == TLS_OFFLOAD_CTX_DIR_TX) 633 return tls_offload_ctx_tx(tls_ctx)->driver_state; 634 else 635 return tls_offload_ctx_rx(tls_ctx)->driver_state; 636 } 637 638 static inline void * 639 tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction) 640 { 641 return __tls_driver_ctx(tls_get_ctx(sk), direction); 642 } 643 644 #define RESYNC_REQ BIT(0) 645 #define RESYNC_REQ_ASYNC BIT(1) 646 /* The TLS context is valid until sk_destruct is called */ 647 static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq) 648 { 649 struct tls_context *tls_ctx = tls_get_ctx(sk); 650 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); 651 652 atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | RESYNC_REQ); 653 } 654 655 /* Log all TLS record header TCP sequences in [seq, seq+len] */ 656 static inline void 657 tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len) 658 { 659 struct tls_context *tls_ctx = tls_get_ctx(sk); 660 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); 661 662 atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) | 663 ((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC); 664 rx_ctx->resync_async->loglen = 0; 665 rx_ctx->resync_async->rcd_delta = 0; 666 } 667 668 static inline void 669 tls_offload_rx_resync_async_request_end(struct sock *sk, __be32 seq) 670 { 671 struct tls_context *tls_ctx = tls_get_ctx(sk); 672 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); 673 674 atomic64_set(&rx_ctx->resync_async->req, 675 ((u64)ntohl(seq) << 32) | RESYNC_REQ); 676 } 677 678 static inline void 679 tls_offload_rx_resync_set_type(struct sock *sk, enum tls_offload_sync_type type) 680 { 681 struct tls_context *tls_ctx = tls_get_ctx(sk); 682 683 tls_offload_ctx_rx(tls_ctx)->resync_type = type; 684 } 685 686 /* Driver's seq tracking has to be disabled until resync succeeded */ 687 static inline bool tls_offload_tx_resync_pending(struct sock *sk) 688 { 689 struct tls_context *tls_ctx = tls_get_ctx(sk); 690 bool ret; 691 692 ret = test_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags); 693 smp_mb__after_atomic(); 694 return ret; 695 } 696 697 int __net_init tls_proc_init(struct net *net); 698 void __net_exit tls_proc_fini(struct net *net); 699 700 int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg, 701 unsigned char *record_type); 702 int decrypt_skb(struct sock *sk, struct sk_buff *skb, 703 struct scatterlist *sgout); 704 struct sk_buff *tls_encrypt_skb(struct sk_buff *skb); 705 706 int tls_sw_fallback_init(struct sock *sk, 707 struct tls_offload_context_tx *offload_ctx, 708 struct tls_crypto_info *crypto_info); 709 710 #ifdef CONFIG_TLS_DEVICE 711 void tls_device_init(void); 712 void tls_device_cleanup(void); 713 void tls_device_sk_destruct(struct sock *sk); 714 int tls_set_device_offload(struct sock *sk, struct tls_context *ctx); 715 void tls_device_free_resources_tx(struct sock *sk); 716 int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx); 717 void tls_device_offload_cleanup_rx(struct sock *sk); 718 void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq); 719 void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq); 720 int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx, 721 struct sk_buff *skb, struct strp_msg *rxm); 722 723 static inline bool tls_is_sk_rx_device_offloaded(struct sock *sk) 724 { 725 if (!sk_fullsock(sk) || 726 smp_load_acquire(&sk->sk_destruct) != tls_device_sk_destruct) 727 return false; 728 return tls_get_ctx(sk)->rx_conf == TLS_HW; 729 } 730 #else 731 static inline void tls_device_init(void) {} 732 static inline void tls_device_cleanup(void) {} 733 734 static inline int 735 tls_set_device_offload(struct sock *sk, struct tls_context *ctx) 736 { 737 return -EOPNOTSUPP; 738 } 739 740 static inline void tls_device_free_resources_tx(struct sock *sk) {} 741 742 static inline int 743 tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx) 744 { 745 return -EOPNOTSUPP; 746 } 747 748 static inline void tls_device_offload_cleanup_rx(struct sock *sk) {} 749 static inline void 750 tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) {} 751 752 static inline int 753 tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx, 754 struct sk_buff *skb, struct strp_msg *rxm) 755 { 756 return 0; 757 } 758 #endif 759 #endif /* _TLS_OFFLOAD_H */ 760