xref: /openbmc/linux/include/net/tls.h (revision 4e40e624)
13c4d7559SDave Watson /*
23c4d7559SDave Watson  * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
33c4d7559SDave Watson  * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
43c4d7559SDave Watson  *
53c4d7559SDave Watson  * This software is available to you under a choice of one of two
63c4d7559SDave Watson  * licenses.  You may choose to be licensed under the terms of the GNU
73c4d7559SDave Watson  * General Public License (GPL) Version 2, available from the file
83c4d7559SDave Watson  * COPYING in the main directory of this source tree, or the
93c4d7559SDave Watson  * OpenIB.org BSD license below:
103c4d7559SDave Watson  *
113c4d7559SDave Watson  *     Redistribution and use in source and binary forms, with or
123c4d7559SDave Watson  *     without modification, are permitted provided that the following
133c4d7559SDave Watson  *     conditions are met:
143c4d7559SDave Watson  *
153c4d7559SDave Watson  *      - Redistributions of source code must retain the above
163c4d7559SDave Watson  *        copyright notice, this list of conditions and the following
173c4d7559SDave Watson  *        disclaimer.
183c4d7559SDave Watson  *
193c4d7559SDave Watson  *      - Redistributions in binary form must reproduce the above
203c4d7559SDave Watson  *        copyright notice, this list of conditions and the following
213c4d7559SDave Watson  *        disclaimer in the documentation and/or other materials
223c4d7559SDave Watson  *        provided with the distribution.
233c4d7559SDave Watson  *
243c4d7559SDave Watson  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
253c4d7559SDave Watson  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
263c4d7559SDave Watson  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
273c4d7559SDave Watson  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
283c4d7559SDave Watson  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
293c4d7559SDave Watson  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
303c4d7559SDave Watson  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
313c4d7559SDave Watson  * SOFTWARE.
323c4d7559SDave Watson  */
333c4d7559SDave Watson 
343c4d7559SDave Watson #ifndef _TLS_OFFLOAD_H
353c4d7559SDave Watson #define _TLS_OFFLOAD_H
363c4d7559SDave Watson 
373c4d7559SDave Watson #include <linux/types.h>
38b9f3eb49SDmitry V. Levin #include <asm/byteorder.h>
39a54667f6SVakul Garg #include <linux/crypto.h>
40b9f3eb49SDmitry V. Levin #include <linux/socket.h>
41b9f3eb49SDmitry V. Levin #include <linux/tcp.h>
4279ffe608SJakub Kicinski #include <linux/mutex.h>
432e361176SJakub Kicinski #include <linux/netdevice.h>
4415a7dea7SJakub Kicinski #include <linux/rcupdate.h>
45d829e9c4SDaniel Borkmann 
46d26b698dSJakub Kicinski #include <net/net_namespace.h>
47b9f3eb49SDmitry V. Levin #include <net/tcp.h>
48c46234ebSDave Watson #include <net/strparser.h>
49a42055e8SVakul Garg #include <crypto/aead.h>
503c4d7559SDave Watson #include <uapi/linux/tls.h>
513c4d7559SDave Watson 
5258790314SJakub Kicinski struct tls_rec;
533c4d7559SDave Watson 
543c4d7559SDave Watson /* Maximum data size carried in a TLS record */
553c4d7559SDave Watson #define TLS_MAX_PAYLOAD_SIZE		((size_t)1 << 14)
563c4d7559SDave Watson 
573c4d7559SDave Watson #define TLS_HEADER_SIZE			5
583c4d7559SDave Watson #define TLS_NONCE_OFFSET		TLS_HEADER_SIZE
593c4d7559SDave Watson 
603c4d7559SDave Watson #define TLS_CRYPTO_INFO_READY(info)	((info)->cipher_type)
613c4d7559SDave Watson 
623c4d7559SDave Watson #define TLS_AAD_SPACE_SIZE		13
63dd0bed16SAtul Gupta 
64f295b3aeSVakul Garg #define MAX_IV_SIZE			16
65a8340cc0SJakub Kicinski #define TLS_TAG_SIZE			16
6689fec474SJakub Kicinski #define TLS_MAX_REC_SEQ_SIZE		8
6750a07aa5SJakub Kicinski #define TLS_MAX_AAD_SIZE		TLS_AAD_SPACE_SIZE
68f295b3aeSVakul Garg 
69128cfb88STianjia Zhang /* For CCM mode, the full 16-bytes of IV is made of '4' fields of given sizes.
70f295b3aeSVakul Garg  *
71f295b3aeSVakul Garg  * IV[16] = b0[1] || implicit nonce[4] || explicit nonce[8] || length[3]
72f295b3aeSVakul Garg  *
73f295b3aeSVakul Garg  * The field 'length' is encoded in field 'b0' as '(length width - 1)'.
74f295b3aeSVakul Garg  * Hence b0 contains (3 - 1) = 2.
75f295b3aeSVakul Garg  */
76f295b3aeSVakul Garg #define TLS_AES_CCM_IV_B0_BYTE		2
77128cfb88STianjia Zhang #define TLS_SM4_CCM_IV_B0_BYTE		2
78f295b3aeSVakul Garg 
794799ac81SBoris Pismenny enum {
804799ac81SBoris Pismenny 	TLS_BASE,
814799ac81SBoris Pismenny 	TLS_SW,
824799ac81SBoris Pismenny 	TLS_HW,
834799ac81SBoris Pismenny 	TLS_HW_RECORD,
844799ac81SBoris Pismenny 	TLS_NUM_CONFIG,
854799ac81SBoris Pismenny };
864799ac81SBoris Pismenny 
87a42055e8SVakul Garg struct tx_work {
88a42055e8SVakul Garg 	struct delayed_work work;
89a42055e8SVakul Garg 	struct sock *sk;
90a42055e8SVakul Garg };
91a42055e8SVakul Garg 
92a42055e8SVakul Garg struct tls_sw_context_tx {
93a42055e8SVakul Garg 	struct crypto_aead *aead_send;
94a42055e8SVakul Garg 	struct crypto_wait async_wait;
95a42055e8SVakul Garg 	struct tx_work tx_work;
96a42055e8SVakul Garg 	struct tls_rec *open_rec;
979932a29aSVakul Garg 	struct list_head tx_list;
98a42055e8SVakul Garg 	atomic_t encrypt_pending;
995c5458ecSJakub Kicinski 	u8 async_capable:1;
100a42055e8SVakul Garg 
101a42055e8SVakul Garg #define BIT_TX_SCHEDULED	0
102f87e62d4SJohn Fastabend #define BIT_TX_CLOSING		1
103a42055e8SVakul Garg 	unsigned long tx_bitmask;
1043c4d7559SDave Watson };
1053c4d7559SDave Watson 
10684c61fe1SJakub Kicinski struct tls_strparser {
10784c61fe1SJakub Kicinski 	struct sock *sk;
10884c61fe1SJakub Kicinski 
10984c61fe1SJakub Kicinski 	u32 mark : 8;
11084c61fe1SJakub Kicinski 	u32 stopped : 1;
11184c61fe1SJakub Kicinski 	u32 copy_mode : 1;
112eca9bfafSJakub Kicinski 	u32 mixed_decrypted : 1;
1134e40e624SSabrina Dubroca 
1144e40e624SSabrina Dubroca 	bool msg_ready;
11584c61fe1SJakub Kicinski 
11684c61fe1SJakub Kicinski 	struct strp_msg stm;
11784c61fe1SJakub Kicinski 
11884c61fe1SJakub Kicinski 	struct sk_buff *anchor;
11984c61fe1SJakub Kicinski 	struct work_struct work;
12084c61fe1SJakub Kicinski };
12184c61fe1SJakub Kicinski 
122f66de3eeSBoris Pismenny struct tls_sw_context_rx {
123f66de3eeSBoris Pismenny 	struct crypto_aead *aead_recv;
124f66de3eeSBoris Pismenny 	struct crypto_wait async_wait;
125692d7b5dSVakul Garg 	struct sk_buff_head rx_list;	/* list of decrypted 'data' records */
126f66de3eeSBoris Pismenny 	void (*saved_data_ready)(struct sock *sk);
127924ad65eSJohn Fastabend 
1284cbc325eSJakub Kicinski 	u8 reader_present;
1295c5458ecSJakub Kicinski 	u8 async_capable:1;
13088527790SJakub Kicinski 	u8 zc_capable:1;
1314cbc325eSJakub Kicinski 	u8 reader_contended:1;
13284c61fe1SJakub Kicinski 
13384c61fe1SJakub Kicinski 	struct tls_strparser strp;
13484c61fe1SJakub Kicinski 
13594524d8fSVakul Garg 	atomic_t decrypt_pending;
136c618db2aSJakub Kicinski 	struct sk_buff_head async_hold;
1374cbc325eSJakub Kicinski 	struct wait_queue_head wq;
13894524d8fSVakul Garg };
13994524d8fSVakul Garg 
140e8f69799SIlya Lesokhin struct tls_record_info {
141e8f69799SIlya Lesokhin 	struct list_head list;
142e8f69799SIlya Lesokhin 	u32 end_seq;
143e8f69799SIlya Lesokhin 	int len;
144e8f69799SIlya Lesokhin 	int num_frags;
145e8f69799SIlya Lesokhin 	skb_frag_t frags[MAX_SKB_FRAGS];
146e8f69799SIlya Lesokhin };
147e8f69799SIlya Lesokhin 
148d80a1b9dSBoris Pismenny struct tls_offload_context_tx {
149e8f69799SIlya Lesokhin 	struct crypto_aead *aead_send;
150e8f69799SIlya Lesokhin 	spinlock_t lock;	/* protects records list */
151e8f69799SIlya Lesokhin 	struct list_head records_list;
152e8f69799SIlya Lesokhin 	struct tls_record_info *open_record;
153e8f69799SIlya Lesokhin 	struct tls_record_info *retransmit_hint;
154e8f69799SIlya Lesokhin 	u64 hint_record_sn;
155e8f69799SIlya Lesokhin 	u64 unacked_record_sn;
156e8f69799SIlya Lesokhin 
157e8f69799SIlya Lesokhin 	struct scatterlist sg_tx_data[MAX_SKB_FRAGS];
158e8f69799SIlya Lesokhin 	void (*sk_destruct)(struct sock *sk);
1597adc91e0STariq Toukan 	struct work_struct destruct_work;
1607adc91e0STariq Toukan 	struct tls_context *ctx;
1612e361176SJakub Kicinski 	u8 driver_state[] __aligned(8);
162e8f69799SIlya Lesokhin 	/* The TLS layer reserves room for driver specific state
163e8f69799SIlya Lesokhin 	 * Currently the belief is that there is not enough
164e8f69799SIlya Lesokhin 	 * driver specific state to justify another layer of indirection
165e8f69799SIlya Lesokhin 	 */
1662d6b51c6SJakub Kicinski #define TLS_DRIVER_STATE_SIZE_TX	16
167e8f69799SIlya Lesokhin };
168e8f69799SIlya Lesokhin 
169d80a1b9dSBoris Pismenny #define TLS_OFFLOAD_CONTEXT_SIZE_TX                                            \
1702e361176SJakub Kicinski 	(sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX)
171e8f69799SIlya Lesokhin 
172e52972c1SJakub Kicinski enum tls_context_flags {
173c55dcdd4SMaxim Mikityanskiy 	/* tls_device_down was called after the netdev went down, device state
174c55dcdd4SMaxim Mikityanskiy 	 * was released, and kTLS works in software, even though rx_conf is
175c55dcdd4SMaxim Mikityanskiy 	 * still TLS_HW (needed for transition).
176c55dcdd4SMaxim Mikityanskiy 	 */
177c55dcdd4SMaxim Mikityanskiy 	TLS_RX_DEV_DEGRADED = 0,
17850180074SJakub Kicinski 	/* Unlike RX where resync is driven entirely by the core in TX only
17950180074SJakub Kicinski 	 * the driver knows when things went out of sync, so we need the flag
18050180074SJakub Kicinski 	 * to be atomic.
18150180074SJakub Kicinski 	 */
18250180074SJakub Kicinski 	TLS_TX_SYNC_SCHED = 1,
183025cc2fbSMaxim Mikityanskiy 	/* tls_dev_del was called for the RX side, device state was released,
184025cc2fbSMaxim Mikityanskiy 	 * but tls_ctx->netdev might still be kept, because TX-side driver
185025cc2fbSMaxim Mikityanskiy 	 * resources might not be released yet. Used to prevent the second
186025cc2fbSMaxim Mikityanskiy 	 * tls_dev_del call in tls_device_down if it happens simultaneously.
187025cc2fbSMaxim Mikityanskiy 	 */
188025cc2fbSMaxim Mikityanskiy 	TLS_RX_DEV_CLOSED = 2,
189e52972c1SJakub Kicinski };
190e52972c1SJakub Kicinski 
191dbe42559SDave Watson struct cipher_context {
192dbe42559SDave Watson 	char *iv;
193dbe42559SDave Watson 	char *rec_seq;
194dbe42559SDave Watson };
195dbe42559SDave Watson 
19686029d10SSabrina Dubroca union tls_crypto_context {
19786029d10SSabrina Dubroca 	struct tls_crypto_info info;
198fb99bce7SDave Watson 	union {
19986029d10SSabrina Dubroca 		struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
200fb99bce7SDave Watson 		struct tls12_crypto_info_aes_gcm_256 aes_gcm_256;
201923c40c4SVadim Fedorenko 		struct tls12_crypto_info_chacha20_poly1305 chacha20_poly1305;
20239d8fb96STianjia Zhang 		struct tls12_crypto_info_sm4_gcm sm4_gcm;
20339d8fb96STianjia Zhang 		struct tls12_crypto_info_sm4_ccm sm4_ccm;
204fb99bce7SDave Watson 	};
20586029d10SSabrina Dubroca };
20686029d10SSabrina Dubroca 
2074509de14SVakul Garg struct tls_prot_info {
2084509de14SVakul Garg 	u16 version;
2094509de14SVakul Garg 	u16 cipher_type;
2104509de14SVakul Garg 	u16 prepend_size;
2114509de14SVakul Garg 	u16 tag_size;
2124509de14SVakul Garg 	u16 overhead_size;
2134509de14SVakul Garg 	u16 iv_size;
214f295b3aeSVakul Garg 	u16 salt_size;
2154509de14SVakul Garg 	u16 rec_seq_size;
2164509de14SVakul Garg 	u16 aad_size;
2174509de14SVakul Garg 	u16 tail_size;
2184509de14SVakul Garg };
2194509de14SVakul Garg 
2203c4d7559SDave Watson struct tls_context {
221f0aaa2c9SJakub Kicinski 	/* read-only cache line */
2224509de14SVakul Garg 	struct tls_prot_info prot_info;
2234509de14SVakul Garg 
224f66de3eeSBoris Pismenny 	u8 tx_conf:3;
225f66de3eeSBoris Pismenny 	u8 rx_conf:3;
226c1318b39SBoris Pismenny 	u8 zerocopy_sendfile:1;
22788527790SJakub Kicinski 	u8 rx_no_pad:1;
2286d88207fSIlya Lesokhin 
229f0aaa2c9SJakub Kicinski 	int (*push_pending_record)(struct sock *sk, int flags);
230f0aaa2c9SJakub Kicinski 	void (*sk_write_space)(struct sock *sk);
231f0aaa2c9SJakub Kicinski 
232f0aaa2c9SJakub Kicinski 	void *priv_ctx_tx;
233f0aaa2c9SJakub Kicinski 	void *priv_ctx_rx;
234f0aaa2c9SJakub Kicinski 
23594ce3b64SMaxim Mikityanskiy 	struct net_device __rcu *netdev;
236f0aaa2c9SJakub Kicinski 
237f0aaa2c9SJakub Kicinski 	/* rw cache line */
238dbe42559SDave Watson 	struct cipher_context tx;
239c46234ebSDave Watson 	struct cipher_context rx;
2403c4d7559SDave Watson 
2413c4d7559SDave Watson 	struct scatterlist *partially_sent_record;
2423c4d7559SDave Watson 	u16 partially_sent_offset;
243a42055e8SVakul Garg 
244e117dcfdSDavid Howells 	bool splicing_pages;
245d829e9c4SDaniel Borkmann 	bool pending_open_record_frags;
24679ffe608SJakub Kicinski 
24779ffe608SJakub Kicinski 	struct mutex tx_lock; /* protects partially_sent_* fields and
24879ffe608SJakub Kicinski 			       * per-type TX fields
24979ffe608SJakub Kicinski 			       */
250f0aaa2c9SJakub Kicinski 	unsigned long flags;
2513c4d7559SDave Watson 
252f0aaa2c9SJakub Kicinski 	/* cache cold stuff */
25332857cf5SJohn Fastabend 	struct proto *sk_proto;
254c55dcdd4SMaxim Mikityanskiy 	struct sock *sk;
25532857cf5SJohn Fastabend 
2564799ac81SBoris Pismenny 	void (*sk_destruct)(struct sock *sk);
257f0aaa2c9SJakub Kicinski 
258f0aaa2c9SJakub Kicinski 	union tls_crypto_context crypto_send;
259f0aaa2c9SJakub Kicinski 	union tls_crypto_context crypto_recv;
260f0aaa2c9SJakub Kicinski 
261f0aaa2c9SJakub Kicinski 	struct list_head list;
262f0aaa2c9SJakub Kicinski 	refcount_t refcount;
26315a7dea7SJakub Kicinski 	struct rcu_head rcu;
2643c4d7559SDave Watson };
2653c4d7559SDave Watson 
266da68b4adSJakub Kicinski enum tls_offload_ctx_dir {
267da68b4adSJakub Kicinski 	TLS_OFFLOAD_CTX_DIR_RX,
268da68b4adSJakub Kicinski 	TLS_OFFLOAD_CTX_DIR_TX,
269da68b4adSJakub Kicinski };
270da68b4adSJakub Kicinski 
271da68b4adSJakub Kicinski struct tlsdev_ops {
272da68b4adSJakub Kicinski 	int (*tls_dev_add)(struct net_device *netdev, struct sock *sk,
273da68b4adSJakub Kicinski 			   enum tls_offload_ctx_dir direction,
274da68b4adSJakub Kicinski 			   struct tls_crypto_info *crypto_info,
275da68b4adSJakub Kicinski 			   u32 start_offload_tcp_sn);
276da68b4adSJakub Kicinski 	void (*tls_dev_del)(struct net_device *netdev,
277da68b4adSJakub Kicinski 			    struct tls_context *ctx,
278da68b4adSJakub Kicinski 			    enum tls_offload_ctx_dir direction);
279b5d9a834SDirk van der Merwe 	int (*tls_dev_resync)(struct net_device *netdev,
280eeb2efafSJakub Kicinski 			      struct sock *sk, u32 seq, u8 *rcd_sn,
281eeb2efafSJakub Kicinski 			      enum tls_offload_ctx_dir direction);
282da68b4adSJakub Kicinski };
283da68b4adSJakub Kicinski 
284f953d33bSJakub Kicinski enum tls_offload_sync_type {
285f953d33bSJakub Kicinski 	TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ = 0,
286f953d33bSJakub Kicinski 	TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT = 1,
287ed9b7646SBoris Pismenny 	TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC = 2,
288f953d33bSJakub Kicinski };
289f953d33bSJakub Kicinski 
290f953d33bSJakub Kicinski #define TLS_DEVICE_RESYNC_NH_START_IVAL		2
291f953d33bSJakub Kicinski #define TLS_DEVICE_RESYNC_NH_MAX_IVAL		128
292f953d33bSJakub Kicinski 
293ed9b7646SBoris Pismenny #define TLS_DEVICE_RESYNC_ASYNC_LOGMAX		13
294ed9b7646SBoris Pismenny struct tls_offload_resync_async {
295ed9b7646SBoris Pismenny 	atomic64_t req;
296138559b9STariq Toukan 	u16 loglen;
297138559b9STariq Toukan 	u16 rcd_delta;
298ed9b7646SBoris Pismenny 	u32 log[TLS_DEVICE_RESYNC_ASYNC_LOGMAX];
299ed9b7646SBoris Pismenny };
300ed9b7646SBoris Pismenny 
3014799ac81SBoris Pismenny struct tls_offload_context_rx {
3024799ac81SBoris Pismenny 	/* sw must be the first member of tls_offload_context_rx */
3034799ac81SBoris Pismenny 	struct tls_sw_context_rx sw;
304f953d33bSJakub Kicinski 	enum tls_offload_sync_type resync_type;
305f953d33bSJakub Kicinski 	/* this member is set regardless of resync_type, to avoid branches */
306f953d33bSJakub Kicinski 	u8 resync_nh_reset:1;
307f953d33bSJakub Kicinski 	/* CORE_NEXT_HINT-only member, but use the hole here */
308f953d33bSJakub Kicinski 	u8 resync_nh_do_now:1;
309f953d33bSJakub Kicinski 	union {
310f953d33bSJakub Kicinski 		/* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ */
311f953d33bSJakub Kicinski 		struct {
3124799ac81SBoris Pismenny 			atomic64_t resync_req;
313f953d33bSJakub Kicinski 		};
314f953d33bSJakub Kicinski 		/* TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT */
315f953d33bSJakub Kicinski 		struct {
316f953d33bSJakub Kicinski 			u32 decrypted_failed;
317f953d33bSJakub Kicinski 			u32 decrypted_tgt;
318f953d33bSJakub Kicinski 		} resync_nh;
319ed9b7646SBoris Pismenny 		/* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC */
320ed9b7646SBoris Pismenny 		struct {
321ed9b7646SBoris Pismenny 			struct tls_offload_resync_async *resync_async;
322ed9b7646SBoris Pismenny 		};
323f953d33bSJakub Kicinski 	};
3242e361176SJakub Kicinski 	u8 driver_state[] __aligned(8);
3254799ac81SBoris Pismenny 	/* The TLS layer reserves room for driver specific state
3264799ac81SBoris Pismenny 	 * Currently the belief is that there is not enough
3274799ac81SBoris Pismenny 	 * driver specific state to justify another layer of indirection
3284799ac81SBoris Pismenny 	 */
3292d6b51c6SJakub Kicinski #define TLS_DRIVER_STATE_SIZE_RX	8
3304799ac81SBoris Pismenny };
3314799ac81SBoris Pismenny 
3324799ac81SBoris Pismenny #define TLS_OFFLOAD_CONTEXT_SIZE_RX					\
3332e361176SJakub Kicinski 	(sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX)
3344799ac81SBoris Pismenny 
335d80a1b9dSBoris Pismenny struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
336e8f69799SIlya Lesokhin 				       u32 seq, u64 *p_record_sn);
337e8f69799SIlya Lesokhin 
tls_record_is_start_marker(struct tls_record_info * rec)338e8f69799SIlya Lesokhin static inline bool tls_record_is_start_marker(struct tls_record_info *rec)
339e8f69799SIlya Lesokhin {
340e8f69799SIlya Lesokhin 	return rec->len == 0;
341e8f69799SIlya Lesokhin }
342e8f69799SIlya Lesokhin 
tls_record_start_seq(struct tls_record_info * rec)343e8f69799SIlya Lesokhin static inline u32 tls_record_start_seq(struct tls_record_info *rec)
344e8f69799SIlya Lesokhin {
345e8f69799SIlya Lesokhin 	return rec->end_seq - rec->len;
346e8f69799SIlya Lesokhin }
347e8f69799SIlya Lesokhin 
3484799ac81SBoris Pismenny struct sk_buff *
3494799ac81SBoris Pismenny tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
3504799ac81SBoris Pismenny 		      struct sk_buff *skb);
351c55dcdd4SMaxim Mikityanskiy struct sk_buff *
352c55dcdd4SMaxim Mikityanskiy tls_validate_xmit_skb_sw(struct sock *sk, struct net_device *dev,
353c55dcdd4SMaxim Mikityanskiy 			 struct sk_buff *skb);
3544799ac81SBoris Pismenny 
tls_is_skb_tx_device_offloaded(const struct sk_buff * skb)355ed3c9a2fSJakub Kicinski static inline bool tls_is_skb_tx_device_offloaded(const struct sk_buff *skb)
356e8f69799SIlya Lesokhin {
357ed3c9a2fSJakub Kicinski #ifdef CONFIG_TLS_DEVICE
358ed3c9a2fSJakub Kicinski 	struct sock *sk = skb->sk;
359ed3c9a2fSJakub Kicinski 
360ed3c9a2fSJakub Kicinski 	return sk && sk_fullsock(sk) &&
3614799ac81SBoris Pismenny 	       (smp_load_acquire(&sk->sk_validate_xmit_skb) ==
3624799ac81SBoris Pismenny 	       &tls_validate_xmit_skb);
3634799ac81SBoris Pismenny #else
3644799ac81SBoris Pismenny 	return false;
3654799ac81SBoris Pismenny #endif
366e8f69799SIlya Lesokhin }
367e8f69799SIlya Lesokhin 
tls_get_ctx(const struct sock * sk)3684509de14SVakul Garg static inline struct tls_context *tls_get_ctx(const struct sock *sk)
3694509de14SVakul Garg {
3704509de14SVakul Garg 	struct inet_connection_sock *icsk = inet_csk(sk);
3714509de14SVakul Garg 
37215a7dea7SJakub Kicinski 	/* Use RCU on icsk_ulp_data only for sock diag code,
37315a7dea7SJakub Kicinski 	 * TLS data path doesn't need rcu_dereference().
37415a7dea7SJakub Kicinski 	 */
37515a7dea7SJakub Kicinski 	return (__force void *)icsk->icsk_ulp_data;
3764509de14SVakul Garg }
3774509de14SVakul Garg 
tls_sw_ctx_rx(const struct tls_context * tls_ctx)378f66de3eeSBoris Pismenny static inline struct tls_sw_context_rx *tls_sw_ctx_rx(
3793c4d7559SDave Watson 		const struct tls_context *tls_ctx)
3803c4d7559SDave Watson {
381f66de3eeSBoris Pismenny 	return (struct tls_sw_context_rx *)tls_ctx->priv_ctx_rx;
382f66de3eeSBoris Pismenny }
383f66de3eeSBoris Pismenny 
tls_sw_ctx_tx(const struct tls_context * tls_ctx)384f66de3eeSBoris Pismenny static inline struct tls_sw_context_tx *tls_sw_ctx_tx(
385f66de3eeSBoris Pismenny 		const struct tls_context *tls_ctx)
386f66de3eeSBoris Pismenny {
387f66de3eeSBoris Pismenny 	return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx;
3883c4d7559SDave Watson }
3893c4d7559SDave Watson 
390d80a1b9dSBoris Pismenny static inline struct tls_offload_context_tx *
tls_offload_ctx_tx(const struct tls_context * tls_ctx)391d80a1b9dSBoris Pismenny tls_offload_ctx_tx(const struct tls_context *tls_ctx)
3923c4d7559SDave Watson {
393d80a1b9dSBoris Pismenny 	return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx;
3943c4d7559SDave Watson }
3953c4d7559SDave Watson 
tls_sw_has_ctx_tx(const struct sock * sk)3960608c69cSJohn Fastabend static inline bool tls_sw_has_ctx_tx(const struct sock *sk)
3970608c69cSJohn Fastabend {
3980608c69cSJohn Fastabend 	struct tls_context *ctx = tls_get_ctx(sk);
3990608c69cSJohn Fastabend 
4000608c69cSJohn Fastabend 	if (!ctx)
4010608c69cSJohn Fastabend 		return false;
4020608c69cSJohn Fastabend 	return !!tls_sw_ctx_tx(ctx);
4030608c69cSJohn Fastabend }
4040608c69cSJohn Fastabend 
tls_sw_has_ctx_rx(const struct sock * sk)405e91de6afSJohn Fastabend static inline bool tls_sw_has_ctx_rx(const struct sock *sk)
406e91de6afSJohn Fastabend {
407e91de6afSJohn Fastabend 	struct tls_context *ctx = tls_get_ctx(sk);
408e91de6afSJohn Fastabend 
409e91de6afSJohn Fastabend 	if (!ctx)
410e91de6afSJohn Fastabend 		return false;
411e91de6afSJohn Fastabend 	return !!tls_sw_ctx_rx(ctx);
412e91de6afSJohn Fastabend }
413e91de6afSJohn Fastabend 
4144799ac81SBoris Pismenny static inline struct tls_offload_context_rx *
tls_offload_ctx_rx(const struct tls_context * tls_ctx)4154799ac81SBoris Pismenny tls_offload_ctx_rx(const struct tls_context *tls_ctx)
4164799ac81SBoris Pismenny {
4174799ac81SBoris Pismenny 	return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx;
4184799ac81SBoris Pismenny }
4194799ac81SBoris Pismenny 
__tls_driver_ctx(struct tls_context * tls_ctx,enum tls_offload_ctx_dir direction)4202e361176SJakub Kicinski static inline void *__tls_driver_ctx(struct tls_context *tls_ctx,
4212e361176SJakub Kicinski 				     enum tls_offload_ctx_dir direction)
4222e361176SJakub Kicinski {
4232e361176SJakub Kicinski 	if (direction == TLS_OFFLOAD_CTX_DIR_TX)
4242e361176SJakub Kicinski 		return tls_offload_ctx_tx(tls_ctx)->driver_state;
4252e361176SJakub Kicinski 	else
4262e361176SJakub Kicinski 		return tls_offload_ctx_rx(tls_ctx)->driver_state;
4272e361176SJakub Kicinski }
4282e361176SJakub Kicinski 
4292e361176SJakub Kicinski static inline void *
tls_driver_ctx(const struct sock * sk,enum tls_offload_ctx_dir direction)4302e361176SJakub Kicinski tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction)
4312e361176SJakub Kicinski {
4322e361176SJakub Kicinski 	return __tls_driver_ctx(tls_get_ctx(sk), direction);
4332e361176SJakub Kicinski }
4342e361176SJakub Kicinski 
435ed9b7646SBoris Pismenny #define RESYNC_REQ BIT(0)
436ed9b7646SBoris Pismenny #define RESYNC_REQ_ASYNC BIT(1)
4374799ac81SBoris Pismenny /* The TLS context is valid until sk_destruct is called */
tls_offload_rx_resync_request(struct sock * sk,__be32 seq)4384799ac81SBoris Pismenny static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
4394799ac81SBoris Pismenny {
4404799ac81SBoris Pismenny 	struct tls_context *tls_ctx = tls_get_ctx(sk);
4414799ac81SBoris Pismenny 	struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
4424799ac81SBoris Pismenny 
443ed9b7646SBoris Pismenny 	atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | RESYNC_REQ);
444ed9b7646SBoris Pismenny }
445ed9b7646SBoris Pismenny 
446ed9b7646SBoris Pismenny /* Log all TLS record header TCP sequences in [seq, seq+len] */
447ed9b7646SBoris Pismenny static inline void
tls_offload_rx_resync_async_request_start(struct sock * sk,__be32 seq,u16 len)448ed9b7646SBoris Pismenny tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len)
449ed9b7646SBoris Pismenny {
450ed9b7646SBoris Pismenny 	struct tls_context *tls_ctx = tls_get_ctx(sk);
451ed9b7646SBoris Pismenny 	struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
452ed9b7646SBoris Pismenny 
453ed9b7646SBoris Pismenny 	atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) |
454a6ed3ebcSColin Ian King 		     ((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC);
455ed9b7646SBoris Pismenny 	rx_ctx->resync_async->loglen = 0;
456138559b9STariq Toukan 	rx_ctx->resync_async->rcd_delta = 0;
457ed9b7646SBoris Pismenny }
458ed9b7646SBoris Pismenny 
459ed9b7646SBoris Pismenny static inline void
tls_offload_rx_resync_async_request_end(struct sock * sk,__be32 seq)460ed9b7646SBoris Pismenny tls_offload_rx_resync_async_request_end(struct sock *sk, __be32 seq)
461ed9b7646SBoris Pismenny {
462ed9b7646SBoris Pismenny 	struct tls_context *tls_ctx = tls_get_ctx(sk);
463ed9b7646SBoris Pismenny 	struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
464ed9b7646SBoris Pismenny 
465ed9b7646SBoris Pismenny 	atomic64_set(&rx_ctx->resync_async->req,
466ed9b7646SBoris Pismenny 		     ((u64)ntohl(seq) << 32) | RESYNC_REQ);
4674799ac81SBoris Pismenny }
4684799ac81SBoris Pismenny 
469f953d33bSJakub Kicinski static inline void
tls_offload_rx_resync_set_type(struct sock * sk,enum tls_offload_sync_type type)470f953d33bSJakub Kicinski tls_offload_rx_resync_set_type(struct sock *sk, enum tls_offload_sync_type type)
471f953d33bSJakub Kicinski {
472f953d33bSJakub Kicinski 	struct tls_context *tls_ctx = tls_get_ctx(sk);
473f953d33bSJakub Kicinski 
474f953d33bSJakub Kicinski 	tls_offload_ctx_rx(tls_ctx)->resync_type = type;
475f953d33bSJakub Kicinski }
4764799ac81SBoris Pismenny 
47750180074SJakub Kicinski /* Driver's seq tracking has to be disabled until resync succeeded */
tls_offload_tx_resync_pending(struct sock * sk)47850180074SJakub Kicinski static inline bool tls_offload_tx_resync_pending(struct sock *sk)
47950180074SJakub Kicinski {
48050180074SJakub Kicinski 	struct tls_context *tls_ctx = tls_get_ctx(sk);
48150180074SJakub Kicinski 	bool ret;
48250180074SJakub Kicinski 
48350180074SJakub Kicinski 	ret = test_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
48450180074SJakub Kicinski 	smp_mb__after_atomic();
48550180074SJakub Kicinski 	return ret;
48650180074SJakub Kicinski }
48750180074SJakub Kicinski 
488b9727d7fSDirk van der Merwe struct sk_buff *tls_encrypt_skb(struct sk_buff *skb);
4893c4d7559SDave Watson 
490be2fbc15SJakub Kicinski #ifdef CONFIG_TLS_DEVICE
4918d5a49e9SJakub Kicinski void tls_device_sk_destruct(struct sock *sk);
4928538d29cSJakub Kicinski void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq);
4938d5a49e9SJakub Kicinski 
tls_is_sk_rx_device_offloaded(struct sock * sk)4948d5a49e9SJakub Kicinski static inline bool tls_is_sk_rx_device_offloaded(struct sock *sk)
4958d5a49e9SJakub Kicinski {
4968d5a49e9SJakub Kicinski 	if (!sk_fullsock(sk) ||
4978d5a49e9SJakub Kicinski 	    smp_load_acquire(&sk->sk_destruct) != tls_device_sk_destruct)
4988d5a49e9SJakub Kicinski 		return false;
4998d5a49e9SJakub Kicinski 	return tls_get_ctx(sk)->rx_conf == TLS_HW;
5008d5a49e9SJakub Kicinski }
501be2fbc15SJakub Kicinski #endif
5023c4d7559SDave Watson #endif /* _TLS_OFFLOAD_H */
503