1 /* 2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #ifndef _TLS_OFFLOAD_H 35 #define _TLS_OFFLOAD_H 36 37 #include <linux/types.h> 38 #include <asm/byteorder.h> 39 #include <linux/crypto.h> 40 #include <linux/socket.h> 41 #include <linux/tcp.h> 42 #include <linux/skmsg.h> 43 #include <linux/netdevice.h> 44 #include <linux/rcupdate.h> 45 46 #include <net/tcp.h> 47 #include <net/strparser.h> 48 #include <crypto/aead.h> 49 #include <uapi/linux/tls.h> 50 51 52 /* Maximum data size carried in a TLS record */ 53 #define TLS_MAX_PAYLOAD_SIZE ((size_t)1 << 14) 54 55 #define TLS_HEADER_SIZE 5 56 #define TLS_NONCE_OFFSET TLS_HEADER_SIZE 57 58 #define TLS_CRYPTO_INFO_READY(info) ((info)->cipher_type) 59 60 #define TLS_RECORD_TYPE_DATA 0x17 61 62 #define TLS_AAD_SPACE_SIZE 13 63 64 #define MAX_IV_SIZE 16 65 #define TLS_MAX_REC_SEQ_SIZE 8 66 67 /* For AES-CCM, the full 16-bytes of IV is made of '4' fields of given sizes. 68 * 69 * IV[16] = b0[1] || implicit nonce[4] || explicit nonce[8] || length[3] 70 * 71 * The field 'length' is encoded in field 'b0' as '(length width - 1)'. 72 * Hence b0 contains (3 - 1) = 2. 73 */ 74 #define TLS_AES_CCM_IV_B0_BYTE 2 75 76 enum { 77 TLS_BASE, 78 TLS_SW, 79 TLS_HW, 80 TLS_HW_RECORD, 81 TLS_NUM_CONFIG, 82 }; 83 84 /* TLS records are maintained in 'struct tls_rec'. It stores the memory pages 85 * allocated or mapped for each TLS record. After encryption, the records are 86 * stores in a linked list. 87 */ 88 struct tls_rec { 89 struct list_head list; 90 int tx_ready; 91 int tx_flags; 92 int inplace_crypto; 93 94 struct sk_msg msg_plaintext; 95 struct sk_msg msg_encrypted; 96 97 /* AAD | msg_plaintext.sg.data | sg_tag */ 98 struct scatterlist sg_aead_in[2]; 99 /* AAD | msg_encrypted.sg.data (data contains overhead for hdr & iv & tag) */ 100 struct scatterlist sg_aead_out[2]; 101 102 char content_type; 103 struct scatterlist sg_content_type; 104 105 char aad_space[TLS_AAD_SPACE_SIZE]; 106 u8 iv_data[MAX_IV_SIZE]; 107 struct aead_request aead_req; 108 u8 aead_req_ctx[]; 109 }; 110 111 struct tls_msg { 112 struct strp_msg rxm; 113 u8 control; 114 }; 115 116 struct tx_work { 117 struct delayed_work work; 118 struct sock *sk; 119 }; 120 121 struct tls_sw_context_tx { 122 struct crypto_aead *aead_send; 123 struct crypto_wait async_wait; 124 struct tx_work tx_work; 125 struct tls_rec *open_rec; 126 struct list_head tx_list; 127 atomic_t encrypt_pending; 128 int async_notify; 129 int async_capable; 130 131 #define BIT_TX_SCHEDULED 0 132 #define BIT_TX_CLOSING 1 133 unsigned long tx_bitmask; 134 }; 135 136 struct tls_sw_context_rx { 137 struct crypto_aead *aead_recv; 138 struct crypto_wait async_wait; 139 struct strparser strp; 140 struct sk_buff_head rx_list; /* list of decrypted 'data' records */ 141 void (*saved_data_ready)(struct sock *sk); 142 143 struct sk_buff *recv_pkt; 144 u8 control; 145 int async_capable; 146 bool decrypted; 147 atomic_t decrypt_pending; 148 bool async_notify; 149 }; 150 151 struct tls_record_info { 152 struct list_head list; 153 u32 end_seq; 154 int len; 155 int num_frags; 156 skb_frag_t frags[MAX_SKB_FRAGS]; 157 }; 158 159 struct tls_offload_context_tx { 160 struct crypto_aead *aead_send; 161 spinlock_t lock; /* protects records list */ 162 struct list_head records_list; 163 struct tls_record_info *open_record; 164 struct tls_record_info *retransmit_hint; 165 u64 hint_record_sn; 166 u64 unacked_record_sn; 167 168 struct scatterlist sg_tx_data[MAX_SKB_FRAGS]; 169 void (*sk_destruct)(struct sock *sk); 170 u8 driver_state[] __aligned(8); 171 /* The TLS layer reserves room for driver specific state 172 * Currently the belief is that there is not enough 173 * driver specific state to justify another layer of indirection 174 */ 175 #define TLS_DRIVER_STATE_SIZE_TX 16 176 }; 177 178 #define TLS_OFFLOAD_CONTEXT_SIZE_TX \ 179 (sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX) 180 181 enum tls_context_flags { 182 TLS_RX_SYNC_RUNNING = 0, 183 /* Unlike RX where resync is driven entirely by the core in TX only 184 * the driver knows when things went out of sync, so we need the flag 185 * to be atomic. 186 */ 187 TLS_TX_SYNC_SCHED = 1, 188 }; 189 190 struct cipher_context { 191 char *iv; 192 char *rec_seq; 193 }; 194 195 union tls_crypto_context { 196 struct tls_crypto_info info; 197 union { 198 struct tls12_crypto_info_aes_gcm_128 aes_gcm_128; 199 struct tls12_crypto_info_aes_gcm_256 aes_gcm_256; 200 }; 201 }; 202 203 struct tls_prot_info { 204 u16 version; 205 u16 cipher_type; 206 u16 prepend_size; 207 u16 tag_size; 208 u16 overhead_size; 209 u16 iv_size; 210 u16 salt_size; 211 u16 rec_seq_size; 212 u16 aad_size; 213 u16 tail_size; 214 }; 215 216 struct tls_context { 217 /* read-only cache line */ 218 struct tls_prot_info prot_info; 219 220 u8 tx_conf:3; 221 u8 rx_conf:3; 222 223 int (*push_pending_record)(struct sock *sk, int flags); 224 void (*sk_write_space)(struct sock *sk); 225 226 void *priv_ctx_tx; 227 void *priv_ctx_rx; 228 229 struct net_device *netdev; 230 231 /* rw cache line */ 232 struct cipher_context tx; 233 struct cipher_context rx; 234 235 struct scatterlist *partially_sent_record; 236 u16 partially_sent_offset; 237 238 bool in_tcp_sendpages; 239 bool pending_open_record_frags; 240 unsigned long flags; 241 242 /* cache cold stuff */ 243 struct proto *sk_proto; 244 245 void (*sk_destruct)(struct sock *sk); 246 247 union tls_crypto_context crypto_send; 248 union tls_crypto_context crypto_recv; 249 250 struct list_head list; 251 refcount_t refcount; 252 struct rcu_head rcu; 253 }; 254 255 enum tls_offload_ctx_dir { 256 TLS_OFFLOAD_CTX_DIR_RX, 257 TLS_OFFLOAD_CTX_DIR_TX, 258 }; 259 260 struct tlsdev_ops { 261 int (*tls_dev_add)(struct net_device *netdev, struct sock *sk, 262 enum tls_offload_ctx_dir direction, 263 struct tls_crypto_info *crypto_info, 264 u32 start_offload_tcp_sn); 265 void (*tls_dev_del)(struct net_device *netdev, 266 struct tls_context *ctx, 267 enum tls_offload_ctx_dir direction); 268 int (*tls_dev_resync)(struct net_device *netdev, 269 struct sock *sk, u32 seq, u8 *rcd_sn, 270 enum tls_offload_ctx_dir direction); 271 }; 272 273 enum tls_offload_sync_type { 274 TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ = 0, 275 TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT = 1, 276 }; 277 278 #define TLS_DEVICE_RESYNC_NH_START_IVAL 2 279 #define TLS_DEVICE_RESYNC_NH_MAX_IVAL 128 280 281 struct tls_offload_context_rx { 282 /* sw must be the first member of tls_offload_context_rx */ 283 struct tls_sw_context_rx sw; 284 enum tls_offload_sync_type resync_type; 285 /* this member is set regardless of resync_type, to avoid branches */ 286 u8 resync_nh_reset:1; 287 /* CORE_NEXT_HINT-only member, but use the hole here */ 288 u8 resync_nh_do_now:1; 289 union { 290 /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ */ 291 struct { 292 atomic64_t resync_req; 293 }; 294 /* TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT */ 295 struct { 296 u32 decrypted_failed; 297 u32 decrypted_tgt; 298 } resync_nh; 299 }; 300 u8 driver_state[] __aligned(8); 301 /* The TLS layer reserves room for driver specific state 302 * Currently the belief is that there is not enough 303 * driver specific state to justify another layer of indirection 304 */ 305 #define TLS_DRIVER_STATE_SIZE_RX 8 306 }; 307 308 #define TLS_OFFLOAD_CONTEXT_SIZE_RX \ 309 (sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX) 310 311 struct tls_context *tls_ctx_create(struct sock *sk); 312 void tls_ctx_free(struct sock *sk, struct tls_context *ctx); 313 void update_sk_prot(struct sock *sk, struct tls_context *ctx); 314 315 int wait_on_pending_writer(struct sock *sk, long *timeo); 316 int tls_sk_query(struct sock *sk, int optname, char __user *optval, 317 int __user *optlen); 318 int tls_sk_attach(struct sock *sk, int optname, char __user *optval, 319 unsigned int optlen); 320 321 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx); 322 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx); 323 void tls_sw_strparser_done(struct tls_context *tls_ctx); 324 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); 325 int tls_sw_sendpage(struct sock *sk, struct page *page, 326 int offset, size_t size, int flags); 327 void tls_sw_cancel_work_tx(struct tls_context *tls_ctx); 328 void tls_sw_release_resources_tx(struct sock *sk); 329 void tls_sw_free_ctx_tx(struct tls_context *tls_ctx); 330 void tls_sw_free_resources_rx(struct sock *sk); 331 void tls_sw_release_resources_rx(struct sock *sk); 332 void tls_sw_free_ctx_rx(struct tls_context *tls_ctx); 333 int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 334 int nonblock, int flags, int *addr_len); 335 bool tls_sw_stream_read(const struct sock *sk); 336 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, 337 struct pipe_inode_info *pipe, 338 size_t len, unsigned int flags); 339 340 int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); 341 int tls_device_sendpage(struct sock *sk, struct page *page, 342 int offset, size_t size, int flags); 343 int tls_tx_records(struct sock *sk, int flags); 344 345 struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context, 346 u32 seq, u64 *p_record_sn); 347 348 static inline bool tls_record_is_start_marker(struct tls_record_info *rec) 349 { 350 return rec->len == 0; 351 } 352 353 static inline u32 tls_record_start_seq(struct tls_record_info *rec) 354 { 355 return rec->end_seq - rec->len; 356 } 357 358 int tls_push_sg(struct sock *sk, struct tls_context *ctx, 359 struct scatterlist *sg, u16 first_offset, 360 int flags); 361 int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, 362 int flags); 363 bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx); 364 365 static inline struct tls_msg *tls_msg(struct sk_buff *skb) 366 { 367 return (struct tls_msg *)strp_msg(skb); 368 } 369 370 static inline bool tls_is_partially_sent_record(struct tls_context *ctx) 371 { 372 return !!ctx->partially_sent_record; 373 } 374 375 static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx) 376 { 377 return tls_ctx->pending_open_record_frags; 378 } 379 380 static inline bool is_tx_ready(struct tls_sw_context_tx *ctx) 381 { 382 struct tls_rec *rec; 383 384 rec = list_first_entry(&ctx->tx_list, struct tls_rec, list); 385 if (!rec) 386 return false; 387 388 return READ_ONCE(rec->tx_ready); 389 } 390 391 static inline u16 tls_user_config(struct tls_context *ctx, bool tx) 392 { 393 u16 config = tx ? ctx->tx_conf : ctx->rx_conf; 394 395 switch (config) { 396 case TLS_BASE: 397 return TLS_CONF_BASE; 398 case TLS_SW: 399 return TLS_CONF_SW; 400 case TLS_HW: 401 return TLS_CONF_HW; 402 case TLS_HW_RECORD: 403 return TLS_CONF_HW_RECORD; 404 } 405 return 0; 406 } 407 408 struct sk_buff * 409 tls_validate_xmit_skb(struct sock *sk, struct net_device *dev, 410 struct sk_buff *skb); 411 412 static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk) 413 { 414 #ifdef CONFIG_SOCK_VALIDATE_XMIT 415 return sk_fullsock(sk) && 416 (smp_load_acquire(&sk->sk_validate_xmit_skb) == 417 &tls_validate_xmit_skb); 418 #else 419 return false; 420 #endif 421 } 422 423 static inline void tls_err_abort(struct sock *sk, int err) 424 { 425 sk->sk_err = err; 426 sk->sk_error_report(sk); 427 } 428 429 static inline bool tls_bigint_increment(unsigned char *seq, int len) 430 { 431 int i; 432 433 for (i = len - 1; i >= 0; i--) { 434 ++seq[i]; 435 if (seq[i] != 0) 436 break; 437 } 438 439 return (i == -1); 440 } 441 442 static inline struct tls_context *tls_get_ctx(const struct sock *sk) 443 { 444 struct inet_connection_sock *icsk = inet_csk(sk); 445 446 /* Use RCU on icsk_ulp_data only for sock diag code, 447 * TLS data path doesn't need rcu_dereference(). 448 */ 449 return (__force void *)icsk->icsk_ulp_data; 450 } 451 452 static inline void tls_advance_record_sn(struct sock *sk, 453 struct tls_prot_info *prot, 454 struct cipher_context *ctx) 455 { 456 if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size)) 457 tls_err_abort(sk, EBADMSG); 458 459 if (prot->version != TLS_1_3_VERSION) 460 tls_bigint_increment(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, 461 prot->iv_size); 462 } 463 464 static inline void tls_fill_prepend(struct tls_context *ctx, 465 char *buf, 466 size_t plaintext_len, 467 unsigned char record_type, 468 int version) 469 { 470 struct tls_prot_info *prot = &ctx->prot_info; 471 size_t pkt_len, iv_size = prot->iv_size; 472 473 pkt_len = plaintext_len + prot->tag_size; 474 if (version != TLS_1_3_VERSION) { 475 pkt_len += iv_size; 476 477 memcpy(buf + TLS_NONCE_OFFSET, 478 ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv_size); 479 } 480 481 /* we cover nonce explicit here as well, so buf should be of 482 * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE 483 */ 484 buf[0] = version == TLS_1_3_VERSION ? 485 TLS_RECORD_TYPE_DATA : record_type; 486 /* Note that VERSION must be TLS_1_2 for both TLS1.2 and TLS1.3 */ 487 buf[1] = TLS_1_2_VERSION_MINOR; 488 buf[2] = TLS_1_2_VERSION_MAJOR; 489 /* we can use IV for nonce explicit according to spec */ 490 buf[3] = pkt_len >> 8; 491 buf[4] = pkt_len & 0xFF; 492 } 493 494 static inline void tls_make_aad(char *buf, 495 size_t size, 496 char *record_sequence, 497 int record_sequence_size, 498 unsigned char record_type, 499 int version) 500 { 501 if (version != TLS_1_3_VERSION) { 502 memcpy(buf, record_sequence, record_sequence_size); 503 buf += 8; 504 } else { 505 size += TLS_CIPHER_AES_GCM_128_TAG_SIZE; 506 } 507 508 buf[0] = version == TLS_1_3_VERSION ? 509 TLS_RECORD_TYPE_DATA : record_type; 510 buf[1] = TLS_1_2_VERSION_MAJOR; 511 buf[2] = TLS_1_2_VERSION_MINOR; 512 buf[3] = size >> 8; 513 buf[4] = size & 0xFF; 514 } 515 516 static inline void xor_iv_with_seq(int version, char *iv, char *seq) 517 { 518 int i; 519 520 if (version == TLS_1_3_VERSION) { 521 for (i = 0; i < 8; i++) 522 iv[i + 4] ^= seq[i]; 523 } 524 } 525 526 527 static inline struct tls_sw_context_rx *tls_sw_ctx_rx( 528 const struct tls_context *tls_ctx) 529 { 530 return (struct tls_sw_context_rx *)tls_ctx->priv_ctx_rx; 531 } 532 533 static inline struct tls_sw_context_tx *tls_sw_ctx_tx( 534 const struct tls_context *tls_ctx) 535 { 536 return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx; 537 } 538 539 static inline struct tls_offload_context_tx * 540 tls_offload_ctx_tx(const struct tls_context *tls_ctx) 541 { 542 return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx; 543 } 544 545 static inline bool tls_sw_has_ctx_tx(const struct sock *sk) 546 { 547 struct tls_context *ctx = tls_get_ctx(sk); 548 549 if (!ctx) 550 return false; 551 return !!tls_sw_ctx_tx(ctx); 552 } 553 554 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx); 555 void tls_device_write_space(struct sock *sk, struct tls_context *ctx); 556 557 static inline struct tls_offload_context_rx * 558 tls_offload_ctx_rx(const struct tls_context *tls_ctx) 559 { 560 return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx; 561 } 562 563 #if IS_ENABLED(CONFIG_TLS_DEVICE) 564 static inline void *__tls_driver_ctx(struct tls_context *tls_ctx, 565 enum tls_offload_ctx_dir direction) 566 { 567 if (direction == TLS_OFFLOAD_CTX_DIR_TX) 568 return tls_offload_ctx_tx(tls_ctx)->driver_state; 569 else 570 return tls_offload_ctx_rx(tls_ctx)->driver_state; 571 } 572 573 static inline void * 574 tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction) 575 { 576 return __tls_driver_ctx(tls_get_ctx(sk), direction); 577 } 578 #endif 579 580 /* The TLS context is valid until sk_destruct is called */ 581 static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq) 582 { 583 struct tls_context *tls_ctx = tls_get_ctx(sk); 584 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); 585 586 atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | 1); 587 } 588 589 static inline void 590 tls_offload_rx_resync_set_type(struct sock *sk, enum tls_offload_sync_type type) 591 { 592 struct tls_context *tls_ctx = tls_get_ctx(sk); 593 594 tls_offload_ctx_rx(tls_ctx)->resync_type = type; 595 } 596 597 static inline void tls_offload_tx_resync_request(struct sock *sk) 598 { 599 struct tls_context *tls_ctx = tls_get_ctx(sk); 600 601 WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags)); 602 } 603 604 /* Driver's seq tracking has to be disabled until resync succeeded */ 605 static inline bool tls_offload_tx_resync_pending(struct sock *sk) 606 { 607 struct tls_context *tls_ctx = tls_get_ctx(sk); 608 bool ret; 609 610 ret = test_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags); 611 smp_mb__after_atomic(); 612 return ret; 613 } 614 615 int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg, 616 unsigned char *record_type); 617 int decrypt_skb(struct sock *sk, struct sk_buff *skb, 618 struct scatterlist *sgout); 619 struct sk_buff *tls_encrypt_skb(struct sk_buff *skb); 620 621 struct sk_buff *tls_validate_xmit_skb(struct sock *sk, 622 struct net_device *dev, 623 struct sk_buff *skb); 624 625 int tls_sw_fallback_init(struct sock *sk, 626 struct tls_offload_context_tx *offload_ctx, 627 struct tls_crypto_info *crypto_info); 628 629 #ifdef CONFIG_TLS_DEVICE 630 void tls_device_init(void); 631 void tls_device_cleanup(void); 632 int tls_set_device_offload(struct sock *sk, struct tls_context *ctx); 633 void tls_device_free_resources_tx(struct sock *sk); 634 int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx); 635 void tls_device_offload_cleanup_rx(struct sock *sk); 636 void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq); 637 int tls_device_decrypted(struct sock *sk, struct sk_buff *skb); 638 #else 639 static inline void tls_device_init(void) {} 640 static inline void tls_device_cleanup(void) {} 641 642 static inline int 643 tls_set_device_offload(struct sock *sk, struct tls_context *ctx) 644 { 645 return -EOPNOTSUPP; 646 } 647 648 static inline void tls_device_free_resources_tx(struct sock *sk) {} 649 650 static inline int 651 tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx) 652 { 653 return -EOPNOTSUPP; 654 } 655 656 static inline void tls_device_offload_cleanup_rx(struct sock *sk) {} 657 static inline void 658 tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) {} 659 660 static inline int tls_device_decrypted(struct sock *sk, struct sk_buff *skb) 661 { 662 return 0; 663 } 664 #endif 665 #endif /* _TLS_OFFLOAD_H */ 666