xref: /openbmc/linux/include/net/tls.h (revision 138559b9)
1 /*
2  * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3  * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #ifndef _TLS_OFFLOAD_H
35 #define _TLS_OFFLOAD_H
36 
37 #include <linux/types.h>
38 #include <asm/byteorder.h>
39 #include <linux/crypto.h>
40 #include <linux/socket.h>
41 #include <linux/tcp.h>
42 #include <linux/skmsg.h>
43 #include <linux/mutex.h>
44 #include <linux/netdevice.h>
45 #include <linux/rcupdate.h>
46 
47 #include <net/net_namespace.h>
48 #include <net/tcp.h>
49 #include <net/strparser.h>
50 #include <crypto/aead.h>
51 #include <uapi/linux/tls.h>
52 
53 
54 /* Maximum data size carried in a TLS record */
55 #define TLS_MAX_PAYLOAD_SIZE		((size_t)1 << 14)
56 
57 #define TLS_HEADER_SIZE			5
58 #define TLS_NONCE_OFFSET		TLS_HEADER_SIZE
59 
60 #define TLS_CRYPTO_INFO_READY(info)	((info)->cipher_type)
61 
62 #define TLS_RECORD_TYPE_DATA		0x17
63 
64 #define TLS_AAD_SPACE_SIZE		13
65 
66 #define MAX_IV_SIZE			16
67 #define TLS_MAX_REC_SEQ_SIZE		8
68 
69 /* For AES-CCM, the full 16-bytes of IV is made of '4' fields of given sizes.
70  *
71  * IV[16] = b0[1] || implicit nonce[4] || explicit nonce[8] || length[3]
72  *
73  * The field 'length' is encoded in field 'b0' as '(length width - 1)'.
74  * Hence b0 contains (3 - 1) = 2.
75  */
76 #define TLS_AES_CCM_IV_B0_BYTE		2
77 
78 #define __TLS_INC_STATS(net, field)				\
79 	__SNMP_INC_STATS((net)->mib.tls_statistics, field)
80 #define TLS_INC_STATS(net, field)				\
81 	SNMP_INC_STATS((net)->mib.tls_statistics, field)
82 #define __TLS_DEC_STATS(net, field)				\
83 	__SNMP_DEC_STATS((net)->mib.tls_statistics, field)
84 #define TLS_DEC_STATS(net, field)				\
85 	SNMP_DEC_STATS((net)->mib.tls_statistics, field)
86 
87 enum {
88 	TLS_BASE,
89 	TLS_SW,
90 	TLS_HW,
91 	TLS_HW_RECORD,
92 	TLS_NUM_CONFIG,
93 };
94 
95 /* TLS records are maintained in 'struct tls_rec'. It stores the memory pages
96  * allocated or mapped for each TLS record. After encryption, the records are
97  * stores in a linked list.
98  */
99 struct tls_rec {
100 	struct list_head list;
101 	int tx_ready;
102 	int tx_flags;
103 
104 	struct sk_msg msg_plaintext;
105 	struct sk_msg msg_encrypted;
106 
107 	/* AAD | msg_plaintext.sg.data | sg_tag */
108 	struct scatterlist sg_aead_in[2];
109 	/* AAD | msg_encrypted.sg.data (data contains overhead for hdr & iv & tag) */
110 	struct scatterlist sg_aead_out[2];
111 
112 	char content_type;
113 	struct scatterlist sg_content_type;
114 
115 	char aad_space[TLS_AAD_SPACE_SIZE];
116 	u8 iv_data[MAX_IV_SIZE];
117 	struct aead_request aead_req;
118 	u8 aead_req_ctx[];
119 };
120 
121 struct tls_msg {
122 	struct strp_msg rxm;
123 	u8 control;
124 };
125 
126 struct tx_work {
127 	struct delayed_work work;
128 	struct sock *sk;
129 };
130 
131 struct tls_sw_context_tx {
132 	struct crypto_aead *aead_send;
133 	struct crypto_wait async_wait;
134 	struct tx_work tx_work;
135 	struct tls_rec *open_rec;
136 	struct list_head tx_list;
137 	atomic_t encrypt_pending;
138 	/* protect crypto_wait with encrypt_pending */
139 	spinlock_t encrypt_compl_lock;
140 	int async_notify;
141 	u8 async_capable:1;
142 
143 #define BIT_TX_SCHEDULED	0
144 #define BIT_TX_CLOSING		1
145 	unsigned long tx_bitmask;
146 };
147 
148 struct tls_sw_context_rx {
149 	struct crypto_aead *aead_recv;
150 	struct crypto_wait async_wait;
151 	struct strparser strp;
152 	struct sk_buff_head rx_list;	/* list of decrypted 'data' records */
153 	void (*saved_data_ready)(struct sock *sk);
154 
155 	struct sk_buff *recv_pkt;
156 	u8 control;
157 	u8 async_capable:1;
158 	u8 decrypted:1;
159 	atomic_t decrypt_pending;
160 	/* protect crypto_wait with decrypt_pending*/
161 	spinlock_t decrypt_compl_lock;
162 	bool async_notify;
163 };
164 
165 struct tls_record_info {
166 	struct list_head list;
167 	u32 end_seq;
168 	int len;
169 	int num_frags;
170 	skb_frag_t frags[MAX_SKB_FRAGS];
171 };
172 
173 struct tls_offload_context_tx {
174 	struct crypto_aead *aead_send;
175 	spinlock_t lock;	/* protects records list */
176 	struct list_head records_list;
177 	struct tls_record_info *open_record;
178 	struct tls_record_info *retransmit_hint;
179 	u64 hint_record_sn;
180 	u64 unacked_record_sn;
181 
182 	struct scatterlist sg_tx_data[MAX_SKB_FRAGS];
183 	void (*sk_destruct)(struct sock *sk);
184 	u8 driver_state[] __aligned(8);
185 	/* The TLS layer reserves room for driver specific state
186 	 * Currently the belief is that there is not enough
187 	 * driver specific state to justify another layer of indirection
188 	 */
189 #define TLS_DRIVER_STATE_SIZE_TX	16
190 };
191 
192 #define TLS_OFFLOAD_CONTEXT_SIZE_TX                                            \
193 	(sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX)
194 
195 enum tls_context_flags {
196 	TLS_RX_SYNC_RUNNING = 0,
197 	/* Unlike RX where resync is driven entirely by the core in TX only
198 	 * the driver knows when things went out of sync, so we need the flag
199 	 * to be atomic.
200 	 */
201 	TLS_TX_SYNC_SCHED = 1,
202 };
203 
204 struct cipher_context {
205 	char *iv;
206 	char *rec_seq;
207 };
208 
209 union tls_crypto_context {
210 	struct tls_crypto_info info;
211 	union {
212 		struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
213 		struct tls12_crypto_info_aes_gcm_256 aes_gcm_256;
214 	};
215 };
216 
217 struct tls_prot_info {
218 	u16 version;
219 	u16 cipher_type;
220 	u16 prepend_size;
221 	u16 tag_size;
222 	u16 overhead_size;
223 	u16 iv_size;
224 	u16 salt_size;
225 	u16 rec_seq_size;
226 	u16 aad_size;
227 	u16 tail_size;
228 };
229 
230 struct tls_context {
231 	/* read-only cache line */
232 	struct tls_prot_info prot_info;
233 
234 	u8 tx_conf:3;
235 	u8 rx_conf:3;
236 
237 	int (*push_pending_record)(struct sock *sk, int flags);
238 	void (*sk_write_space)(struct sock *sk);
239 
240 	void *priv_ctx_tx;
241 	void *priv_ctx_rx;
242 
243 	struct net_device *netdev;
244 
245 	/* rw cache line */
246 	struct cipher_context tx;
247 	struct cipher_context rx;
248 
249 	struct scatterlist *partially_sent_record;
250 	u16 partially_sent_offset;
251 
252 	bool in_tcp_sendpages;
253 	bool pending_open_record_frags;
254 
255 	struct mutex tx_lock; /* protects partially_sent_* fields and
256 			       * per-type TX fields
257 			       */
258 	unsigned long flags;
259 
260 	/* cache cold stuff */
261 	struct proto *sk_proto;
262 
263 	void (*sk_destruct)(struct sock *sk);
264 
265 	union tls_crypto_context crypto_send;
266 	union tls_crypto_context crypto_recv;
267 
268 	struct list_head list;
269 	refcount_t refcount;
270 	struct rcu_head rcu;
271 };
272 
273 enum tls_offload_ctx_dir {
274 	TLS_OFFLOAD_CTX_DIR_RX,
275 	TLS_OFFLOAD_CTX_DIR_TX,
276 };
277 
278 struct tlsdev_ops {
279 	int (*tls_dev_add)(struct net_device *netdev, struct sock *sk,
280 			   enum tls_offload_ctx_dir direction,
281 			   struct tls_crypto_info *crypto_info,
282 			   u32 start_offload_tcp_sn);
283 	void (*tls_dev_del)(struct net_device *netdev,
284 			    struct tls_context *ctx,
285 			    enum tls_offload_ctx_dir direction);
286 	int (*tls_dev_resync)(struct net_device *netdev,
287 			      struct sock *sk, u32 seq, u8 *rcd_sn,
288 			      enum tls_offload_ctx_dir direction);
289 };
290 
291 enum tls_offload_sync_type {
292 	TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ = 0,
293 	TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT = 1,
294 	TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC = 2,
295 };
296 
297 #define TLS_DEVICE_RESYNC_NH_START_IVAL		2
298 #define TLS_DEVICE_RESYNC_NH_MAX_IVAL		128
299 
300 #define TLS_DEVICE_RESYNC_ASYNC_LOGMAX		13
301 struct tls_offload_resync_async {
302 	atomic64_t req;
303 	u16 loglen;
304 	u16 rcd_delta;
305 	u32 log[TLS_DEVICE_RESYNC_ASYNC_LOGMAX];
306 };
307 
308 struct tls_offload_context_rx {
309 	/* sw must be the first member of tls_offload_context_rx */
310 	struct tls_sw_context_rx sw;
311 	enum tls_offload_sync_type resync_type;
312 	/* this member is set regardless of resync_type, to avoid branches */
313 	u8 resync_nh_reset:1;
314 	/* CORE_NEXT_HINT-only member, but use the hole here */
315 	u8 resync_nh_do_now:1;
316 	union {
317 		/* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ */
318 		struct {
319 			atomic64_t resync_req;
320 		};
321 		/* TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT */
322 		struct {
323 			u32 decrypted_failed;
324 			u32 decrypted_tgt;
325 		} resync_nh;
326 		/* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC */
327 		struct {
328 			struct tls_offload_resync_async *resync_async;
329 		};
330 	};
331 	u8 driver_state[] __aligned(8);
332 	/* The TLS layer reserves room for driver specific state
333 	 * Currently the belief is that there is not enough
334 	 * driver specific state to justify another layer of indirection
335 	 */
336 #define TLS_DRIVER_STATE_SIZE_RX	8
337 };
338 
339 #define TLS_OFFLOAD_CONTEXT_SIZE_RX					\
340 	(sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX)
341 
342 struct tls_context *tls_ctx_create(struct sock *sk);
343 void tls_ctx_free(struct sock *sk, struct tls_context *ctx);
344 void update_sk_prot(struct sock *sk, struct tls_context *ctx);
345 
346 int wait_on_pending_writer(struct sock *sk, long *timeo);
347 int tls_sk_query(struct sock *sk, int optname, char __user *optval,
348 		int __user *optlen);
349 int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
350 		  unsigned int optlen);
351 
352 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
353 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx);
354 void tls_sw_strparser_done(struct tls_context *tls_ctx);
355 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
356 int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
357 			   int offset, size_t size, int flags);
358 int tls_sw_sendpage(struct sock *sk, struct page *page,
359 		    int offset, size_t size, int flags);
360 void tls_sw_cancel_work_tx(struct tls_context *tls_ctx);
361 void tls_sw_release_resources_tx(struct sock *sk);
362 void tls_sw_free_ctx_tx(struct tls_context *tls_ctx);
363 void tls_sw_free_resources_rx(struct sock *sk);
364 void tls_sw_release_resources_rx(struct sock *sk);
365 void tls_sw_free_ctx_rx(struct tls_context *tls_ctx);
366 int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
367 		   int nonblock, int flags, int *addr_len);
368 bool tls_sw_stream_read(const struct sock *sk);
369 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
370 			   struct pipe_inode_info *pipe,
371 			   size_t len, unsigned int flags);
372 
373 int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
374 int tls_device_sendpage(struct sock *sk, struct page *page,
375 			int offset, size_t size, int flags);
376 int tls_tx_records(struct sock *sk, int flags);
377 
378 struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
379 				       u32 seq, u64 *p_record_sn);
380 
381 static inline bool tls_record_is_start_marker(struct tls_record_info *rec)
382 {
383 	return rec->len == 0;
384 }
385 
386 static inline u32 tls_record_start_seq(struct tls_record_info *rec)
387 {
388 	return rec->end_seq - rec->len;
389 }
390 
391 int tls_push_sg(struct sock *sk, struct tls_context *ctx,
392 		struct scatterlist *sg, u16 first_offset,
393 		int flags);
394 int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
395 			    int flags);
396 void tls_free_partial_record(struct sock *sk, struct tls_context *ctx);
397 
398 static inline struct tls_msg *tls_msg(struct sk_buff *skb)
399 {
400 	return (struct tls_msg *)strp_msg(skb);
401 }
402 
403 static inline bool tls_is_partially_sent_record(struct tls_context *ctx)
404 {
405 	return !!ctx->partially_sent_record;
406 }
407 
408 static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
409 {
410 	return tls_ctx->pending_open_record_frags;
411 }
412 
413 static inline bool is_tx_ready(struct tls_sw_context_tx *ctx)
414 {
415 	struct tls_rec *rec;
416 
417 	rec = list_first_entry(&ctx->tx_list, struct tls_rec, list);
418 	if (!rec)
419 		return false;
420 
421 	return READ_ONCE(rec->tx_ready);
422 }
423 
424 static inline u16 tls_user_config(struct tls_context *ctx, bool tx)
425 {
426 	u16 config = tx ? ctx->tx_conf : ctx->rx_conf;
427 
428 	switch (config) {
429 	case TLS_BASE:
430 		return TLS_CONF_BASE;
431 	case TLS_SW:
432 		return TLS_CONF_SW;
433 	case TLS_HW:
434 		return TLS_CONF_HW;
435 	case TLS_HW_RECORD:
436 		return TLS_CONF_HW_RECORD;
437 	}
438 	return 0;
439 }
440 
441 struct sk_buff *
442 tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
443 		      struct sk_buff *skb);
444 
445 static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
446 {
447 #ifdef CONFIG_SOCK_VALIDATE_XMIT
448 	return sk_fullsock(sk) &&
449 	       (smp_load_acquire(&sk->sk_validate_xmit_skb) ==
450 	       &tls_validate_xmit_skb);
451 #else
452 	return false;
453 #endif
454 }
455 
456 static inline void tls_err_abort(struct sock *sk, int err)
457 {
458 	sk->sk_err = err;
459 	sk->sk_error_report(sk);
460 }
461 
462 static inline bool tls_bigint_increment(unsigned char *seq, int len)
463 {
464 	int i;
465 
466 	for (i = len - 1; i >= 0; i--) {
467 		++seq[i];
468 		if (seq[i] != 0)
469 			break;
470 	}
471 
472 	return (i == -1);
473 }
474 
475 static inline void tls_bigint_subtract(unsigned char *seq, int  n)
476 {
477 	u64 rcd_sn;
478 	__be64 *p;
479 
480 	BUILD_BUG_ON(TLS_MAX_REC_SEQ_SIZE != 8);
481 
482 	p = (__be64 *)seq;
483 	rcd_sn = be64_to_cpu(*p);
484 	*p = cpu_to_be64(rcd_sn - n);
485 }
486 
487 static inline struct tls_context *tls_get_ctx(const struct sock *sk)
488 {
489 	struct inet_connection_sock *icsk = inet_csk(sk);
490 
491 	/* Use RCU on icsk_ulp_data only for sock diag code,
492 	 * TLS data path doesn't need rcu_dereference().
493 	 */
494 	return (__force void *)icsk->icsk_ulp_data;
495 }
496 
497 static inline void tls_advance_record_sn(struct sock *sk,
498 					 struct tls_prot_info *prot,
499 					 struct cipher_context *ctx)
500 {
501 	if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size))
502 		tls_err_abort(sk, EBADMSG);
503 
504 	if (prot->version != TLS_1_3_VERSION)
505 		tls_bigint_increment(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
506 				     prot->iv_size);
507 }
508 
509 static inline void tls_fill_prepend(struct tls_context *ctx,
510 			     char *buf,
511 			     size_t plaintext_len,
512 			     unsigned char record_type,
513 			     int version)
514 {
515 	struct tls_prot_info *prot = &ctx->prot_info;
516 	size_t pkt_len, iv_size = prot->iv_size;
517 
518 	pkt_len = plaintext_len + prot->tag_size;
519 	if (version != TLS_1_3_VERSION) {
520 		pkt_len += iv_size;
521 
522 		memcpy(buf + TLS_NONCE_OFFSET,
523 		       ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv_size);
524 	}
525 
526 	/* we cover nonce explicit here as well, so buf should be of
527 	 * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE
528 	 */
529 	buf[0] = version == TLS_1_3_VERSION ?
530 		   TLS_RECORD_TYPE_DATA : record_type;
531 	/* Note that VERSION must be TLS_1_2 for both TLS1.2 and TLS1.3 */
532 	buf[1] = TLS_1_2_VERSION_MINOR;
533 	buf[2] = TLS_1_2_VERSION_MAJOR;
534 	/* we can use IV for nonce explicit according to spec */
535 	buf[3] = pkt_len >> 8;
536 	buf[4] = pkt_len & 0xFF;
537 }
538 
539 static inline void tls_make_aad(char *buf,
540 				size_t size,
541 				char *record_sequence,
542 				int record_sequence_size,
543 				unsigned char record_type,
544 				int version)
545 {
546 	if (version != TLS_1_3_VERSION) {
547 		memcpy(buf, record_sequence, record_sequence_size);
548 		buf += 8;
549 	} else {
550 		size += TLS_CIPHER_AES_GCM_128_TAG_SIZE;
551 	}
552 
553 	buf[0] = version == TLS_1_3_VERSION ?
554 		  TLS_RECORD_TYPE_DATA : record_type;
555 	buf[1] = TLS_1_2_VERSION_MAJOR;
556 	buf[2] = TLS_1_2_VERSION_MINOR;
557 	buf[3] = size >> 8;
558 	buf[4] = size & 0xFF;
559 }
560 
561 static inline void xor_iv_with_seq(int version, char *iv, char *seq)
562 {
563 	int i;
564 
565 	if (version == TLS_1_3_VERSION) {
566 		for (i = 0; i < 8; i++)
567 			iv[i + 4] ^= seq[i];
568 	}
569 }
570 
571 
572 static inline struct tls_sw_context_rx *tls_sw_ctx_rx(
573 		const struct tls_context *tls_ctx)
574 {
575 	return (struct tls_sw_context_rx *)tls_ctx->priv_ctx_rx;
576 }
577 
578 static inline struct tls_sw_context_tx *tls_sw_ctx_tx(
579 		const struct tls_context *tls_ctx)
580 {
581 	return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx;
582 }
583 
584 static inline struct tls_offload_context_tx *
585 tls_offload_ctx_tx(const struct tls_context *tls_ctx)
586 {
587 	return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx;
588 }
589 
590 static inline bool tls_sw_has_ctx_tx(const struct sock *sk)
591 {
592 	struct tls_context *ctx = tls_get_ctx(sk);
593 
594 	if (!ctx)
595 		return false;
596 	return !!tls_sw_ctx_tx(ctx);
597 }
598 
599 static inline bool tls_sw_has_ctx_rx(const struct sock *sk)
600 {
601 	struct tls_context *ctx = tls_get_ctx(sk);
602 
603 	if (!ctx)
604 		return false;
605 	return !!tls_sw_ctx_rx(ctx);
606 }
607 
608 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx);
609 void tls_device_write_space(struct sock *sk, struct tls_context *ctx);
610 
611 static inline struct tls_offload_context_rx *
612 tls_offload_ctx_rx(const struct tls_context *tls_ctx)
613 {
614 	return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx;
615 }
616 
617 #if IS_ENABLED(CONFIG_TLS_DEVICE)
618 static inline void *__tls_driver_ctx(struct tls_context *tls_ctx,
619 				     enum tls_offload_ctx_dir direction)
620 {
621 	if (direction == TLS_OFFLOAD_CTX_DIR_TX)
622 		return tls_offload_ctx_tx(tls_ctx)->driver_state;
623 	else
624 		return tls_offload_ctx_rx(tls_ctx)->driver_state;
625 }
626 
627 static inline void *
628 tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction)
629 {
630 	return __tls_driver_ctx(tls_get_ctx(sk), direction);
631 }
632 #endif
633 
634 #define RESYNC_REQ BIT(0)
635 #define RESYNC_REQ_ASYNC BIT(1)
636 /* The TLS context is valid until sk_destruct is called */
637 static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
638 {
639 	struct tls_context *tls_ctx = tls_get_ctx(sk);
640 	struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
641 
642 	atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | RESYNC_REQ);
643 }
644 
645 /* Log all TLS record header TCP sequences in [seq, seq+len] */
646 static inline void
647 tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len)
648 {
649 	struct tls_context *tls_ctx = tls_get_ctx(sk);
650 	struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
651 
652 	atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) |
653 		     ((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC);
654 	rx_ctx->resync_async->loglen = 0;
655 	rx_ctx->resync_async->rcd_delta = 0;
656 }
657 
658 static inline void
659 tls_offload_rx_resync_async_request_end(struct sock *sk, __be32 seq)
660 {
661 	struct tls_context *tls_ctx = tls_get_ctx(sk);
662 	struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
663 
664 	atomic64_set(&rx_ctx->resync_async->req,
665 		     ((u64)ntohl(seq) << 32) | RESYNC_REQ);
666 }
667 
668 static inline void
669 tls_offload_rx_resync_set_type(struct sock *sk, enum tls_offload_sync_type type)
670 {
671 	struct tls_context *tls_ctx = tls_get_ctx(sk);
672 
673 	tls_offload_ctx_rx(tls_ctx)->resync_type = type;
674 }
675 
676 /* Driver's seq tracking has to be disabled until resync succeeded */
677 static inline bool tls_offload_tx_resync_pending(struct sock *sk)
678 {
679 	struct tls_context *tls_ctx = tls_get_ctx(sk);
680 	bool ret;
681 
682 	ret = test_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
683 	smp_mb__after_atomic();
684 	return ret;
685 }
686 
687 int __net_init tls_proc_init(struct net *net);
688 void __net_exit tls_proc_fini(struct net *net);
689 
690 int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
691 		      unsigned char *record_type);
692 int decrypt_skb(struct sock *sk, struct sk_buff *skb,
693 		struct scatterlist *sgout);
694 struct sk_buff *tls_encrypt_skb(struct sk_buff *skb);
695 
696 int tls_sw_fallback_init(struct sock *sk,
697 			 struct tls_offload_context_tx *offload_ctx,
698 			 struct tls_crypto_info *crypto_info);
699 
700 #ifdef CONFIG_TLS_DEVICE
701 void tls_device_init(void);
702 void tls_device_cleanup(void);
703 void tls_device_sk_destruct(struct sock *sk);
704 int tls_set_device_offload(struct sock *sk, struct tls_context *ctx);
705 void tls_device_free_resources_tx(struct sock *sk);
706 int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
707 void tls_device_offload_cleanup_rx(struct sock *sk);
708 void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq);
709 void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq);
710 int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
711 			 struct sk_buff *skb, struct strp_msg *rxm);
712 
713 static inline bool tls_is_sk_rx_device_offloaded(struct sock *sk)
714 {
715 	if (!sk_fullsock(sk) ||
716 	    smp_load_acquire(&sk->sk_destruct) != tls_device_sk_destruct)
717 		return false;
718 	return tls_get_ctx(sk)->rx_conf == TLS_HW;
719 }
720 #else
721 static inline void tls_device_init(void) {}
722 static inline void tls_device_cleanup(void) {}
723 
724 static inline int
725 tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
726 {
727 	return -EOPNOTSUPP;
728 }
729 
730 static inline void tls_device_free_resources_tx(struct sock *sk) {}
731 
732 static inline int
733 tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
734 {
735 	return -EOPNOTSUPP;
736 }
737 
738 static inline void tls_device_offload_cleanup_rx(struct sock *sk) {}
739 static inline void
740 tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) {}
741 
742 static inline int
743 tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
744 		     struct sk_buff *skb, struct strp_msg *rxm)
745 {
746 	return 0;
747 }
748 #endif
749 #endif /* _TLS_OFFLOAD_H */
750